diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index e4f7a3e..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(sudo docker ps:*)", - "Bash(sudo -S docker ps:*)", - "Bash(env)", - "Bash(docker inspect --help:*)", - "Bash(docker info:*)", - "Bash(docker version:*)", - "Bash(sudo docker version:*)", - "Bash(go build:*)" - ] - } -} diff --git a/.gitignore b/.gitignore index 4e44143..1cbcf55 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ +.claude .envrc /sds \ No newline at end of file diff --git a/.reflex b/.reflex index 47b3acf..c61e746 100644 --- a/.reflex +++ b/.reflex @@ -1,8 +1,23 @@ +-r "^$" -s -R "devel/.*" -- sh -c \ + 'echo "Starting Devenv..." && \ + DLOG="${DLOG:-warn}" ./devel/sds devenv' --r "\.(go|sql)$" -s -R "devel/.*" -- sh -c \ - 'echo "Starting Consumer Sidecar ..." && \ - DLOG=".*=debug" ./devel/sds consumer sidecar' +# -r "\.(go|sql)$" -s -R "devel/.*" -- sh -c \ +# 'echo "Starting Consumer Sidecar ..." && \ +# DLOG=".*=debug" ./devel/sds consumer sidecar' -r "\.(go|sql)$" -s -R "devel/.*" -- sh -c \ 'echo "Starting Provider Sidecar ..." && \ - DLOG=".*=debug" ./devel/sds provider sidecar' \ No newline at end of file + touch ./devel/.provider-sidecar && \ + DLOG=".*=debug" ./devel/sds provider sidecar \ + --grpc-listen-addr=:9001 \ + --service-provider=0xa6f1845e54b1d6a95319251f1ca775b4ad406cdf \ + --collector-address=0x1d01649b4f94722b55b5c3b3e10fe26cd90c1ba9 \ + --escrow-address=0xfc7487a37ca8eac2e64cba61277aa109e9b8631e \ + --rpc-endpoint=http://localhost:58545' + +-r "(\.provider-sidecar|firecore\.config\.yaml)$" -s -R "devel/.*" -- sh -c \ + 'echo "Restarting firehose-core instance (5s delay for sidecar startup)..." && \ + sleep 5 && \ + rm -rf ./devel/.firehose && \ + DLOG="${DLOG:-error}" firecore -c devel/firecore.config.yaml -d ./devel/.firehose start' diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..49638e3 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,18 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## Unreleased + +### Added + +- Add `sds://` scheme plugins for firehose-core integration (`provider/plugin` package) + - `plugin.RegisterAuth()` - registers `sds://` with dauth for RAV-based authentication + - `plugin.RegisterSession()` - registers `sds://` with dsession for worker pool management + - `plugin.RegisterMetering()` - registers `sds://` with dmetering for usage tracking + - `plugin.Register()` - convenience function to register all three plugins at once +- Plugins are gRPC/Connect clients that connect to the provider sidecar +- All business logic (service provider, escrow, quotas) is configured on the sidecar, not the plugin +- Plugin configuration is minimal: `sds://host:port?plaintext=true&network=my-network` diff --git a/cmd/sds/main.go b/cmd/sds/main.go index ca6cf26..bea2d88 100644 --- a/cmd/sds/main.go +++ b/cmd/sds/main.go @@ -35,5 +35,7 @@ func main() { consumerSidecarCmd, consumerFakeClientCmd, ), + + toolsCmd, ) } diff --git a/cmd/sds/provider_sidecar.go b/cmd/sds/provider_sidecar.go index 99c64ce..30d5540 100644 --- a/cmd/sds/provider_sidecar.go +++ b/cmd/sds/provider_sidecar.go @@ -34,7 +34,7 @@ var providerSidecarCmd = Command( price_per_byte: "0.0000000001" # Price per byte transferred in GRT `), Flags(func(flags *pflag.FlagSet) { - flags.String("grpc-listen-addr", ":9001", "gRPC server listen address") + flags.String("grpc-listen-addr", ":9001", "gRPC server listen address for Connect/HTTP services") flags.String("service-provider", "", "Service provider address (required)") flags.Uint64("chain-id", 1337, "Chain ID for EIP-712 domain") flags.String("collector-address", "", "Collector contract address for EIP-712 domain (required)") diff --git a/cmd/sds/tools_rav.go b/cmd/sds/tools_rav.go new file mode 100644 index 0000000..7efecbc --- /dev/null +++ b/cmd/sds/tools_rav.go @@ -0,0 +1,262 @@ +package main + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "math/big" + "strings" + "time" + + "github.com/graphprotocol/substreams-data-service/horizon" + commonv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/common/v1" + "github.com/graphprotocol/substreams-data-service/sidecar" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/streamingfast/cli" + . "github.com/streamingfast/cli" + "github.com/streamingfast/cli/sflags" + "github.com/streamingfast/eth-go" + "google.golang.org/protobuf/proto" +) + +// GRT token definition (18 decimals like ETH) +var GRTToken = ð.Token{ + Name: "Graph Token", + Symbol: "GRT", + Decimals: 18, +} + +var toolsCmd = Group( + "tools", + "Development and debugging tools", + toolsRAVCmd, +) + +var toolsRAVCmd = Group( + "rav", + "RAV (Receipt Aggregate Voucher) tools", + toolsRAVCreateCmd, +) + +var toolsRAVCreateCmd = Command( + runToolsRAVCreate, + "create", + "Create a signed RAV for testing", + Description(` + Creates a signed RAV (Receipt Aggregate Voucher) that can be used to + authenticate requests to a provider. + + The output is a base64-encoded protobuf that can be used as the + x-sds-rav header value. + + Example usage: + sds tools rav create \ + --payer=0xe90874856c339d5d3733c92ea5acadc6014b34d5 \ + --service-provider=0xa6f1845e54b1d6a95319251f1ca775b4ad406cdf \ + --data-service=0x37478fd2f5845e3664fe4155d74c00e1a4e7a5e2 \ + --collector-address=0x1d01649b4f94722b55b5c3b3e10fe26cd90c1ba9 \ + --signer-key=0xe4c2694501255921b6588519cfd36d4e86ddc4ce19ab1bc91d9c58057c040304 \ + --value="1 GRT" + + Value formats: + - "1 GRT" or "1GRT" (1 GRT = 1e18) + - "0.5 GRT" (0.5 GRT = 5e17) + - "1000000000000000000" (raw, 18 decimals) + + Use as header: + grpcurl -H "x-sds-rav: " ... + `), + Flags(func(flags *pflag.FlagSet) { + flags.String("payer", "", "Payer address (required)") + flags.String("service-provider", "", "Service provider address (required)") + flags.String("data-service", "", "Data service contract address (required)") + flags.String("collector-address", "", "Collector contract address for EIP-712 domain (required)") + flags.Uint64("chain-id", 1337, "Chain ID for EIP-712 domain") + flags.String("signer-key", "", "Private key to sign the RAV (hex, with or without 0x prefix) (required)") + flags.String("value", "1 GRT", "Value aggregate: '10 GRT', '0.5GRT', or raw like '1000000000000000000'") + flags.String("collection-id", "", "Collection ID (32 bytes hex). If empty, a random one is generated") + }), +) + +func runToolsRAVCreate(cmd *cobra.Command, args []string) error { + payerHex := sflags.MustGetString(cmd, "payer") + serviceProviderHex := sflags.MustGetString(cmd, "service-provider") + dataServiceHex := sflags.MustGetString(cmd, "data-service") + collectorHex := sflags.MustGetString(cmd, "collector-address") + chainID := sflags.MustGetUint64(cmd, "chain-id") + signerKeyHex := sflags.MustGetString(cmd, "signer-key") + valueStr := sflags.MustGetString(cmd, "value") + collectionIDHex := sflags.MustGetString(cmd, "collection-id") + + // Validate required fields + cli.Ensure(payerHex != "", "--payer is required") + cli.Ensure(serviceProviderHex != "", "--service-provider is required") + cli.Ensure(dataServiceHex != "", "--data-service is required") + cli.Ensure(collectorHex != "", "--collector-address is required") + cli.Ensure(signerKeyHex != "", "--signer-key is required") + + // Parse addresses + payer, err := eth.NewAddress(payerHex) + cli.NoError(err, "invalid --payer address %q", payerHex) + + serviceProvider, err := eth.NewAddress(serviceProviderHex) + cli.NoError(err, "invalid --service-provider address %q", serviceProviderHex) + + dataService, err := eth.NewAddress(dataServiceHex) + cli.NoError(err, "invalid --data-service address %q", dataServiceHex) + + collector, err := eth.NewAddress(collectorHex) + cli.NoError(err, "invalid --collector-address address %q", collectorHex) + + // Parse signer key + signerKey, err := eth.NewPrivateKey(signerKeyHex) + cli.NoError(err, "invalid --signer-key %q", signerKeyHex) + + // Parse value (supports "10 GRT", "0.5GRT", or raw) + value, err := parseGRTValue(valueStr) + cli.NoError(err, "invalid --value %q", valueStr) + + // Parse or generate collection ID + var collectionID horizon.CollectionID + if collectionIDHex != "" { + h, err := eth.NewHash(collectionIDHex) + cli.NoError(err, "invalid --collection-id %q", collectionIDHex) + copy(collectionID[:], h) + } else { + // Generate random collection ID + if _, err := rand.Read(collectionID[:]); err != nil { + return fmt.Errorf("generating random collection ID: %w", err) + } + } + + // Create the RAV + rav := &horizon.RAV{ + CollectionID: collectionID, + Payer: payer, + ServiceProvider: serviceProvider, + DataService: dataService, + TimestampNs: uint64(time.Now().UnixNano()), + ValueAggregate: value, + Metadata: nil, + } + + // Create the EIP-712 domain + domain := horizon.NewDomain(chainID, collector) + + // Sign the RAV + signedRAV, err := horizon.Sign(domain, rav, signerKey) + if err != nil { + return fmt.Errorf("signing RAV: %w", err) + } + + // Convert to proto + protoSignedRAV := sidecar.HorizonSignedRAVToProto(signedRAV) + + // Encode as protobuf + protoBytes, err := proto.Marshal(protoSignedRAV) + if err != nil { + return fmt.Errorf("marshaling proto: %w", err) + } + + // Encode as base64 + base64Encoded := base64.StdEncoding.EncodeToString(protoBytes) + + // Print info + fmt.Println("RAV Details:") + fmt.Printf(" Collection ID: %s\n", eth.Hash(collectionID[:]).Pretty()) + fmt.Printf(" Payer: %s\n", payer.Pretty()) + fmt.Printf(" Service Provider: %s\n", serviceProvider.Pretty()) + fmt.Printf(" Data Service: %s\n", dataService.Pretty()) + fmt.Printf(" Value Aggregate: %s (raw: %s)\n", formatGRT(value), value.String()) + fmt.Printf(" Timestamp: %d\n", rav.TimestampNs) + fmt.Printf(" Signer: %s\n", signerKey.PublicKey().Address().Pretty()) + fmt.Println() + fmt.Println("EIP-712 Domain:") + fmt.Printf(" Name: %s\n", domain.Name) + fmt.Printf(" Version: %s\n", domain.Version) + fmt.Printf(" Chain ID: %d\n", chainID) + fmt.Printf(" Verifying Contract: %s\n", collector.Pretty()) + fmt.Println() + fmt.Println("Base64-encoded SignedRAV (for x-sds-rav header):") + fmt.Println(base64Encoded) + + return nil +} + +// parseGRTValue parses a value string that can be: +// - "10 GRT" or "10GRT" (with optional space) +// - "0.5 GRT" (decimal GRT) +// - "1000000000000000000" (raw, 18 decimals) +func parseGRTValue(s string) (*big.Int, error) { + s = strings.TrimSpace(s) + + // Check for GRT suffix (case-insensitive) + lower := strings.ToLower(s) + if strings.HasSuffix(lower, "grt") { + // Remove "grt" suffix and trim + numStr := strings.TrimSpace(s[:len(s)-3]) + return parseDecimalToBigInt(numStr, GRTToken.Decimals) + } + + // Try parsing as raw integer + value, ok := new(big.Int).SetString(s, 10) + if !ok { + return nil, fmt.Errorf("invalid value: must be a number with optional 'GRT' suffix (e.g., '10 GRT', '0.5GRT') or raw integer") + } + return value, nil +} + +// parseDecimalToBigInt parses a decimal string and converts to big.Int with given decimals +// e.g., "1.5" with decimals=18 -> 1500000000000000000 +func parseDecimalToBigInt(s string, decimals uint) (*big.Int, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, fmt.Errorf("empty value") + } + + // Split on decimal point + parts := strings.Split(s, ".") + if len(parts) > 2 { + return nil, fmt.Errorf("invalid number: multiple decimal points") + } + + // Parse integer part + intPart, ok := new(big.Int).SetString(parts[0], 10) + if !ok { + return nil, fmt.Errorf("invalid integer part: %s", parts[0]) + } + + // Multiply by 10^decimals + multiplier := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(decimals)), nil) + result := new(big.Int).Mul(intPart, multiplier) + + // Handle fractional part if present + if len(parts) == 2 { + fracStr := parts[1] + if len(fracStr) > int(decimals) { + return nil, fmt.Errorf("too many decimal places: max %d", decimals) + } + + // Pad with zeros to match decimals + fracStr = fracStr + strings.Repeat("0", int(decimals)-len(fracStr)) + + fracPart, ok := new(big.Int).SetString(fracStr, 10) + if !ok { + return nil, fmt.Errorf("invalid fractional part: %s", parts[1]) + } + + // Add fractional part (already scaled) + result.Add(result, fracPart) + } + + return result, nil +} + +// formatGRT formats a raw value as GRT with up to 6 decimal places +func formatGRT(raw *big.Int) string { + return GRTToken.AmountBig(raw).Format(6) +} + +// Ensure proto import is used +var _ = commonv1.SignedRAV{} diff --git a/devel/.gitignore b/devel/.gitignore new file mode 100644 index 0000000..366d388 --- /dev/null +++ b/devel/.gitignore @@ -0,0 +1,10 @@ +# Compiled binaries +sds + +# State files created during devenv operation +.provider-sidecar +.consumer-sidecar +.firehose/ + +# Any temporary data +*.tmp diff --git a/devel/firecore.config.yaml b/devel/firecore.config.yaml new file mode 100644 index 0000000..50b6d04 --- /dev/null +++ b/devel/firecore.config.yaml @@ -0,0 +1,35 @@ +start: + args: + - reader-node + - merger + - relayer + - firehose + - substreams-tier1 + - substreams-tier2 + flags: + advertise-block-id-encoding: "hex" + advertise-chain-name: "acme-dummy-blockchain" + + # SDS Provider Sidecar plugin configuration + # The provider sidecar runs on :9001 (Connect) and serves all SDS services + # Dev API key allows bypassing RAV authentication for local testing + common-auth-plugin: "sds://localhost:9001?plaintext=true&dev-api-key=dev-test-key" + common-session-plugin: "sds://localhost:9001?plaintext=true" + common-metering-plugin: "sds://localhost:9001?plaintext=true&network=sds-dummy-blockchain" + + # Install dummy-blockchain with: go install github.com/streamingfast/dummy-blockchain@latest + reader-node-path: "dummy-blockchain" + reader-node-data-dir: "{data-dir}/reader-node" + reader-node-arguments: start + --log-level=error + --tracer=firehose + --store-dir="{node-data-dir}" + --block-rate=15 + --genesis-block-burst=2 + --genesis-height=0 + --server-addr=:9777 + + # The * suffix make the server listen in TLS with a self-signed certificate, required + # for server-side to be able to correctly fetch the Authorization header from the client + substreams-tier1-grpc-listen-addr: ":10016*" + firehose-grpc-listen-addr: ":10015*" diff --git a/go.mod b/go.mod index 238a8df..d3dac5f 100644 --- a/go.mod +++ b/go.mod @@ -1,28 +1,36 @@ module github.com/graphprotocol/substreams-data-service -go 1.24.0 +go 1.24.2 toolchain go1.24.4 require ( connectrpc.com/connect v1.19.1 + github.com/alphadose/haxmap v1.4.1 github.com/google/uuid v1.6.0 github.com/spf13/cobra v1.1.3 github.com/spf13/pflag v1.0.5 github.com/streamingfast/cli v0.0.4-0.20250815192146-d8a233ec3d0b + github.com/streamingfast/dauth v0.0.0-20251218134044-fb716c7172b4 github.com/streamingfast/dgrpc v0.0.0-20251218142640-027692a12722 + github.com/streamingfast/dmetering v0.0.0-20251027175535-4fd530934b97 + github.com/streamingfast/dsession v0.0.0-20251029144057-b94d1030e142 github.com/streamingfast/eth-go v0.0.0-20260216202159-4e2b7501894a github.com/streamingfast/logging v0.0.0-20260108192805-38f96de0a641 github.com/streamingfast/shutter v1.5.0 + github.com/streamingfast/worker-pool-protocol v0.0.0-20251029142144-b539534f3eb1 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.40.0 go.uber.org/zap v1.27.1 golang.org/x/crypto v0.47.0 + golang.org/x/net v0.48.0 + google.golang.org/grpc v1.77.0 google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v3 v3.0.1 ) require ( + cel.dev/expr v0.24.0 // indirect cloud.google.com/go/auth v0.16.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect @@ -48,6 +56,7 @@ require ( github.com/charmbracelet/lipgloss v1.0.0 // indirect github.com/charmbracelet/x/ansi v0.4.2 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect @@ -60,8 +69,11 @@ require ( github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.8.4 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -71,6 +83,7 @@ require ( github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.3.2 // indirect @@ -104,6 +117,7 @@ require ( github.com/paulbellamy/ratecounter v0.2.0 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.16.0 // indirect @@ -114,10 +128,11 @@ require ( github.com/rs/cors v1.8.3 // indirect github.com/shirou/gopsutil/v4 v4.25.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.9.3 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.15.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/streamingfast/dmetrics v0.0.0-20250711072030-f023e918a175 // indirect github.com/streamingfast/sf-tracing v0.0.0-20251218140752-bafd5572499f // indirect github.com/subosito/gotenv v1.4.2 // indirect @@ -141,8 +156,9 @@ require ( go.opentelemetry.io/otel/trace v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/atomic v1.11.0 // indirect + go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 // indirect golang.org/x/oauth2 v0.32.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect @@ -152,6 +168,5 @@ require ( google.golang.org/api v0.249.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/grpc v1.77.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) diff --git a/go.sum b/go.sum index 34317e6..a6ee8a1 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -83,6 +85,8 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alphadose/haxmap v1.4.1 h1:VtD6VCxUkjNIfJk/aWdYFfOzrRddDFjmvmRmILg7x8Q= +github.com/alphadose/haxmap v1.4.1/go.mod h1:rjHw1IAqbxm0S3U5tD16GoKsiAd8FWx5BJ2IYqXwgmM= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -170,8 +174,11 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= @@ -187,6 +194,8 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -201,6 +210,7 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -277,6 +287,7 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= @@ -463,8 +474,8 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -479,12 +490,20 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/streamingfast/cli v0.0.4-0.20250815192146-d8a233ec3d0b h1:ztYeX3/5rg2tV2EU7edcrcHzMz6wUbdJB+LqCrP5W8s= github.com/streamingfast/cli v0.0.4-0.20250815192146-d8a233ec3d0b/go.mod h1:o9R/tjNON01X2mgWL5qirl2MV6xQ4EZI5D504ST3K/M= +github.com/streamingfast/dauth v0.0.0-20251218134044-fb716c7172b4 h1:qJmiXzqs3T7124+FXEnhPgK/3fbSjqjQuBTdIrUk4Oc= +github.com/streamingfast/dauth v0.0.0-20251218134044-fb716c7172b4/go.mod h1:4g4HFFqtlmdK1FIxnVToetc7unBRAGRSjc4VArvUmbM= github.com/streamingfast/dgrpc v0.0.0-20251218142640-027692a12722 h1:gZcPR64H5aWs49bLDoNTjhsedTfva7fAIei891LL0C8= github.com/streamingfast/dgrpc v0.0.0-20251218142640-027692a12722/go.mod h1:NbkvenEHfjQpUBRHTCp/tp0Ayjp5hDzzkv/Ve9Uka1I= +github.com/streamingfast/dmetering v0.0.0-20251027175535-4fd530934b97 h1:rXZYa87AFt+kluwe4McC9jZgx/eRNSlOkWQaIj9Yv4w= +github.com/streamingfast/dmetering v0.0.0-20251027175535-4fd530934b97/go.mod h1:UqWuX3REU/IInBUaymFN2eLjuvz+/0SsoUFjeQlLNyI= github.com/streamingfast/dmetrics v0.0.0-20250711072030-f023e918a175 h1:pqgbZm2Agu9uoU4MIzTHpLunUB3IppREGL8itDrnEeo= github.com/streamingfast/dmetrics v0.0.0-20250711072030-f023e918a175/go.mod h1:JbxEDbzWRG1dHdNIPrYfuPllEkktZMgm40AwVIBENcw= +github.com/streamingfast/dsession v0.0.0-20251029144057-b94d1030e142 h1:mgPawX0VOgrG2LHSc6EfY15r0x3AO/GK6LY58io3/+4= +github.com/streamingfast/dsession v0.0.0-20251029144057-b94d1030e142/go.mod h1:ldDwFqr20xXyaLhwDm7XlMaf2vvoZNOrjgsMcFJGtrw= github.com/streamingfast/eth-go v0.0.0-20260216202159-4e2b7501894a h1:jHKRM0gM5Gh/vRzJTv5u0myac8zxlGgICZr71+QRPz0= github.com/streamingfast/eth-go v0.0.0-20260216202159-4e2b7501894a/go.mod h1:1szIYZI+rTjWWVYd1kFJomFiPCMvcZfBmylCwRaK8yY= github.com/streamingfast/logging v0.0.0-20260108192805-38f96de0a641 h1:dI+b2TyFS0rJw1xVGzxBBvn/RrSgBpqM/RjwDInxEfo= @@ -493,6 +512,8 @@ github.com/streamingfast/sf-tracing v0.0.0-20251218140752-bafd5572499f h1:KXE5ME github.com/streamingfast/sf-tracing v0.0.0-20251218140752-bafd5572499f/go.mod h1:H57LMxdkHi0MZ+n7xr7zo9PJv7kZZ38du9w0SizLcFU= github.com/streamingfast/shutter v1.5.0 h1:NpzDYzj0HVpSiDJVO/FFSL6QIK/YKOxY0gJAtyaTOgs= github.com/streamingfast/shutter v1.5.0/go.mod h1:B/T6efqdeMGbGwjzPS1ToXzYZI4kDzI5/u4I+7qbjY8= +github.com/streamingfast/worker-pool-protocol v0.0.0-20251029142144-b539534f3eb1 h1:aoPCoeTHwCQbzRwLBpT45GUvZgAzevIUBgPZjJzBbug= +github.com/streamingfast/worker-pool-protocol v0.0.0-20251029142144-b539534f3eb1/go.mod h1:fq44dFx8K9S3/9QOoIVk0s+ptEscfpQmftLog/9WYU4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -574,6 +595,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -588,7 +611,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -601,6 +624,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 h1:QfTh0HpN6hlw6D3vu8DAwC8pBIwikq0AI1evdm+FksE= +golang.org/x/exp v0.0.0-20221031165847-c99f073a8326/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -659,6 +684,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -725,6 +751,7 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -746,6 +773,8 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/horizon/devenv/devenv.go b/horizon/devenv/devenv.go index 57f99d9..3e9370b 100644 --- a/horizon/devenv/devenv.go +++ b/horizon/devenv/devenv.go @@ -104,14 +104,14 @@ func start(ctx context.Context, opts ...Option) (*Env, error) { collector := mustLoadContract("GraphTallyCollector") dataService := mustLoadContract("SubstreamsDataService") - // Start Anvil container + // Start Anvil container with fixed port binding report("Starting Anvil container...") anvilReq := testcontainers.ContainerRequest{ Image: "ghcr.io/foundry-rs/foundry:latest", Cmd: []string{ fmt.Sprintf("anvil --host 0.0.0.0 --port 8545 --chain-id %d", config.ChainID), }, - ExposedPorts: []string{"8545/tcp"}, + ExposedPorts: []string{fmt.Sprintf("%d:8545/tcp", config.RPCPort)}, WaitingFor: wait.ForListeningPort("8545/tcp"). WithStartupTimeout(60 * time.Second), } @@ -126,17 +126,8 @@ func start(ctx context.Context, opts ...Option) (*Env, error) { return nil, fmt.Errorf("starting anvil container: %w", err) } - mappedPort, err := anvilContainer.MappedPort(ctx, "8545/tcp") - if err != nil { - zlog.Error("failed to get mapped port", zap.Error(err)) - anvilContainer.Terminate(ctx) - cancel() - return nil, fmt.Errorf("getting mapped port: %w", err) - } - - // Use localhost for Docker published ports - using container.Host() may return - // an IP that goes through a proxy and gets blocked - rpcURL := fmt.Sprintf("http://localhost:%s", mappedPort.Port()) + // Use the fixed port from config for the RPC URL + rpcURL := fmt.Sprintf("http://localhost:%d", config.RPCPort) zlog.Info("Anvil RPC endpoint ready", zap.String("rpc_url", rpcURL)) // Create RPC client diff --git a/horizon/devenv/options.go b/horizon/devenv/options.go index 46b8e62..b96c02c 100644 --- a/horizon/devenv/options.go +++ b/horizon/devenv/options.go @@ -16,6 +16,8 @@ func (NoopReporter) ReportProgress(message string) {} type Config struct { // ChainID is the chain ID for the Anvil network (default: 1337) ChainID uint64 + // RPCPort is the fixed host port for the Anvil RPC endpoint (default: 58545) + RPCPort int // EscrowAmount is the default amount to deposit in escrow (default: 10,000 GRT) EscrowAmount *big.Int // ProvisionAmount is the default provision amount (default: 1,000 GRT) @@ -34,6 +36,7 @@ func DefaultConfig() *Config { return &Config{ ChainID: 1337, + RPCPort: 58545, EscrowAmount: escrow, ProvisionAmount: provision, Reporter: NoopReporter{}, @@ -50,6 +53,13 @@ func WithChainID(chainID uint64) Option { } } +// WithRPCPort sets the fixed host port for the Anvil RPC endpoint +func WithRPCPort(port int) Option { + return func(c *Config) { + c.RPCPort = port + } +} + // WithEscrowAmount sets the default escrow amount func WithEscrowAmount(amount *big.Int) Option { return func(c *Config) { diff --git a/pb/graph/substreams/data_service/sds/auth/v1/auth.pb.go b/pb/graph/substreams/data_service/sds/auth/v1/auth.pb.go new file mode 100644 index 0000000..a489885 --- /dev/null +++ b/pb/graph/substreams/data_service/sds/auth/v1/auth.pb.go @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: graph/substreams/data_service/sds/auth/v1/auth.proto + +package authv1 + +import ( + v1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ValidateAuthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // SignedRAV extracted from the `x-sds-rav` gRPC header + PaymentRav *v1.SignedRAV `protobuf:"bytes,1,opt,name=payment_rav,json=paymentRav,proto3" json:"payment_rav,omitempty"` + // Client IP address (optional, for logging/rate-limiting) + IpAddress string `protobuf:"bytes,2,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Request path/endpoint (optional) + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateAuthRequest) Reset() { + *x = ValidateAuthRequest{} + mi := &file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateAuthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateAuthRequest) ProtoMessage() {} + +func (x *ValidateAuthRequest) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateAuthRequest.ProtoReflect.Descriptor instead. +func (*ValidateAuthRequest) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescGZIP(), []int{0} +} + +func (x *ValidateAuthRequest) GetPaymentRav() *v1.SignedRAV { + if x != nil { + return x.PaymentRav + } + return nil +} + +func (x *ValidateAuthRequest) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *ValidateAuthRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type ValidateAuthResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Payer Ethereum address (0x...) → maps to x-user-id / x-organization-id + OrganizationId string `protobuf:"bytes,1,opt,name=organization_id,json=organizationId,proto3" json:"organization_id,omitempty"` + // Optional identifier for sub-key or signer; empty for now + ApiKeyId string `protobuf:"bytes,2,opt,name=api_key_id,json=apiKeyId,proto3" json:"api_key_id,omitempty"` + // Additional context to pass through as trusted headers + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateAuthResponse) Reset() { + *x = ValidateAuthResponse{} + mi := &file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateAuthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateAuthResponse) ProtoMessage() {} + +func (x *ValidateAuthResponse) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateAuthResponse.ProtoReflect.Descriptor instead. +func (*ValidateAuthResponse) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescGZIP(), []int{1} +} + +func (x *ValidateAuthResponse) GetOrganizationId() string { + if x != nil { + return x.OrganizationId + } + return "" +} + +func (x *ValidateAuthResponse) GetApiKeyId() string { + if x != nil { + return x.ApiKeyId + } + return "" +} + +func (x *ValidateAuthResponse) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +var File_graph_substreams_data_service_sds_auth_v1_auth_proto protoreflect.FileDescriptor + +const file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDesc = "" + + "\n" + + "4graph/substreams/data_service/sds/auth/v1/auth.proto\x12)graph.substreams.data_service.sds.auth.v1\x1a3graph/substreams/data_service/common/v1/types.proto\"\x9d\x01\n" + + "\x13ValidateAuthRequest\x12S\n" + + "\vpayment_rav\x18\x01 \x01(\v22.graph.substreams.data_service.common.v1.SignedRAVR\n" + + "paymentRav\x12\x1d\n" + + "\n" + + "ip_address\x18\x02 \x01(\tR\tipAddress\x12\x12\n" + + "\x04path\x18\x03 \x01(\tR\x04path\"\x85\x02\n" + + "\x14ValidateAuthResponse\x12'\n" + + "\x0forganization_id\x18\x01 \x01(\tR\x0eorganizationId\x12\x1c\n" + + "\n" + + "api_key_id\x18\x02 \x01(\tR\bapiKeyId\x12i\n" + + "\bmetadata\x18\x03 \x03(\v2M.graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse.MetadataEntryR\bmetadata\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x012\x9f\x01\n" + + "\vAuthService\x12\x8f\x01\n" + + "\fValidateAuth\x12>.graph.substreams.data_service.sds.auth.v1.ValidateAuthRequest\x1a?.graph.substreams.data_service.sds.auth.v1.ValidateAuthResponseB\xe7\x02\n" + + "-com.graph.substreams.data_service.sds.auth.v1B\tAuthProtoP\x01Zdgithub.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1;authv1\xa2\x02\x05GSDSA\xaa\x02(Graph.Substreams.DataService.Sds.Auth.V1\xca\x02(Graph\\Substreams\\DataService\\Sds\\Auth\\V1\xe2\x024Graph\\Substreams\\DataService\\Sds\\Auth\\V1\\GPBMetadata\xea\x02-Graph::Substreams::DataService::Sds::Auth::V1b\x06proto3" + +var ( + file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescOnce sync.Once + file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescData []byte +) + +func file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescGZIP() []byte { + file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescOnce.Do(func() { + file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDesc), len(file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDesc))) + }) + return file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDescData +} + +var file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_graph_substreams_data_service_sds_auth_v1_auth_proto_goTypes = []any{ + (*ValidateAuthRequest)(nil), // 0: graph.substreams.data_service.sds.auth.v1.ValidateAuthRequest + (*ValidateAuthResponse)(nil), // 1: graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse + nil, // 2: graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse.MetadataEntry + (*v1.SignedRAV)(nil), // 3: graph.substreams.data_service.common.v1.SignedRAV +} +var file_graph_substreams_data_service_sds_auth_v1_auth_proto_depIdxs = []int32{ + 3, // 0: graph.substreams.data_service.sds.auth.v1.ValidateAuthRequest.payment_rav:type_name -> graph.substreams.data_service.common.v1.SignedRAV + 2, // 1: graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse.metadata:type_name -> graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse.MetadataEntry + 0, // 2: graph.substreams.data_service.sds.auth.v1.AuthService.ValidateAuth:input_type -> graph.substreams.data_service.sds.auth.v1.ValidateAuthRequest + 1, // 3: graph.substreams.data_service.sds.auth.v1.AuthService.ValidateAuth:output_type -> graph.substreams.data_service.sds.auth.v1.ValidateAuthResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_graph_substreams_data_service_sds_auth_v1_auth_proto_init() } +func file_graph_substreams_data_service_sds_auth_v1_auth_proto_init() { + if File_graph_substreams_data_service_sds_auth_v1_auth_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDesc), len(file_graph_substreams_data_service_sds_auth_v1_auth_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_graph_substreams_data_service_sds_auth_v1_auth_proto_goTypes, + DependencyIndexes: file_graph_substreams_data_service_sds_auth_v1_auth_proto_depIdxs, + MessageInfos: file_graph_substreams_data_service_sds_auth_v1_auth_proto_msgTypes, + }.Build() + File_graph_substreams_data_service_sds_auth_v1_auth_proto = out.File + file_graph_substreams_data_service_sds_auth_v1_auth_proto_goTypes = nil + file_graph_substreams_data_service_sds_auth_v1_auth_proto_depIdxs = nil +} diff --git a/pb/graph/substreams/data_service/sds/auth/v1/authv1connect/auth.connect.go b/pb/graph/substreams/data_service/sds/auth/v1/authv1connect/auth.connect.go new file mode 100644 index 0000000..f22d477 --- /dev/null +++ b/pb/graph/substreams/data_service/sds/auth/v1/authv1connect/auth.connect.go @@ -0,0 +1,116 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: graph/substreams/data_service/sds/auth/v1/auth.proto + +package authv1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // AuthServiceName is the fully-qualified name of the AuthService service. + AuthServiceName = "graph.substreams.data_service.sds.auth.v1.AuthService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // AuthServiceValidateAuthProcedure is the fully-qualified name of the AuthService's ValidateAuth + // RPC. + AuthServiceValidateAuthProcedure = "/graph.substreams.data_service.sds.auth.v1.AuthService/ValidateAuth" +) + +// AuthServiceClient is a client for the graph.substreams.data_service.sds.auth.v1.AuthService +// service. +type AuthServiceClient interface { + // ValidateAuth validates a SignedRAV and returns auth context for the caller. + // Called by the sds:// dauth plugin when a client connects with an x-sds-rav header. + ValidateAuth(context.Context, *connect.Request[v1.ValidateAuthRequest]) (*connect.Response[v1.ValidateAuthResponse], error) +} + +// NewAuthServiceClient constructs a client for the +// graph.substreams.data_service.sds.auth.v1.AuthService service. By default, it uses the Connect +// protocol with the binary Protobuf Codec, asks for gzipped responses, and sends uncompressed +// requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewAuthServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) AuthServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + authServiceMethods := v1.File_graph_substreams_data_service_sds_auth_v1_auth_proto.Services().ByName("AuthService").Methods() + return &authServiceClient{ + validateAuth: connect.NewClient[v1.ValidateAuthRequest, v1.ValidateAuthResponse]( + httpClient, + baseURL+AuthServiceValidateAuthProcedure, + connect.WithSchema(authServiceMethods.ByName("ValidateAuth")), + connect.WithClientOptions(opts...), + ), + } +} + +// authServiceClient implements AuthServiceClient. +type authServiceClient struct { + validateAuth *connect.Client[v1.ValidateAuthRequest, v1.ValidateAuthResponse] +} + +// ValidateAuth calls graph.substreams.data_service.sds.auth.v1.AuthService.ValidateAuth. +func (c *authServiceClient) ValidateAuth(ctx context.Context, req *connect.Request[v1.ValidateAuthRequest]) (*connect.Response[v1.ValidateAuthResponse], error) { + return c.validateAuth.CallUnary(ctx, req) +} + +// AuthServiceHandler is an implementation of the +// graph.substreams.data_service.sds.auth.v1.AuthService service. +type AuthServiceHandler interface { + // ValidateAuth validates a SignedRAV and returns auth context for the caller. + // Called by the sds:// dauth plugin when a client connects with an x-sds-rav header. + ValidateAuth(context.Context, *connect.Request[v1.ValidateAuthRequest]) (*connect.Response[v1.ValidateAuthResponse], error) +} + +// NewAuthServiceHandler builds an HTTP handler from the service implementation. It returns the path +// on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewAuthServiceHandler(svc AuthServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + authServiceMethods := v1.File_graph_substreams_data_service_sds_auth_v1_auth_proto.Services().ByName("AuthService").Methods() + authServiceValidateAuthHandler := connect.NewUnaryHandler( + AuthServiceValidateAuthProcedure, + svc.ValidateAuth, + connect.WithSchema(authServiceMethods.ByName("ValidateAuth")), + connect.WithHandlerOptions(opts...), + ) + return "/graph.substreams.data_service.sds.auth.v1.AuthService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case AuthServiceValidateAuthProcedure: + authServiceValidateAuthHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedAuthServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedAuthServiceHandler struct{} + +func (UnimplementedAuthServiceHandler) ValidateAuth(context.Context, *connect.Request[v1.ValidateAuthRequest]) (*connect.Response[v1.ValidateAuthResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("graph.substreams.data_service.sds.auth.v1.AuthService.ValidateAuth is not implemented")) +} diff --git a/pb/graph/substreams/data_service/sds/session/v1/session.pb.go b/pb/graph/substreams/data_service/sds/session/v1/session.pb.go new file mode 100644 index 0000000..741379d --- /dev/null +++ b/pb/graph/substreams/data_service/sds/session/v1/session.pb.go @@ -0,0 +1,567 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: graph/substreams/data_service/sds/session/v1/session.proto + +package sessionv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// BorrowStatus indicates the outcome of a BorrowWorker call. +type BorrowStatus int32 + +const ( + BorrowStatus_BORROW_STATUS_UNSPECIFIED BorrowStatus = 0 + // Worker slot acquired successfully + BorrowStatus_BORROW_STATUS_BORROWED BorrowStatus = 1 + // Quota exceeded; caller should return an unavailable error to the client + BorrowStatus_BORROW_STATUS_RESOURCE_EXHAUSTED BorrowStatus = 2 +) + +// Enum value maps for BorrowStatus. +var ( + BorrowStatus_name = map[int32]string{ + 0: "BORROW_STATUS_UNSPECIFIED", + 1: "BORROW_STATUS_BORROWED", + 2: "BORROW_STATUS_RESOURCE_EXHAUSTED", + } + BorrowStatus_value = map[string]int32{ + "BORROW_STATUS_UNSPECIFIED": 0, + "BORROW_STATUS_BORROWED": 1, + "BORROW_STATUS_RESOURCE_EXHAUSTED": 2, + } +) + +func (x BorrowStatus) Enum() *BorrowStatus { + p := new(BorrowStatus) + *p = x + return p +} + +func (x BorrowStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BorrowStatus) Descriptor() protoreflect.EnumDescriptor { + return file_graph_substreams_data_service_sds_session_v1_session_proto_enumTypes[0].Descriptor() +} + +func (BorrowStatus) Type() protoreflect.EnumType { + return &file_graph_substreams_data_service_sds_session_v1_session_proto_enumTypes[0] +} + +func (x BorrowStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BorrowStatus.Descriptor instead. +func (BorrowStatus) EnumDescriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{0} +} + +// BorrowWorkerRequest acquires a worker slot for a request. +type BorrowWorkerRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Service identifier, e.g., "substreams" + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + // Payer Ethereum address (from auth context) + OrganizationId string `protobuf:"bytes,2,opt,name=organization_id,json=organizationId,proto3" json:"organization_id,omitempty"` + // Optional sub-key or signer identifier; empty for now + ApiKeyId string `protobuf:"bytes,3,opt,name=api_key_id,json=apiKeyId,proto3" json:"api_key_id,omitempty"` + // Trace ID of the incoming request for deduplication + TraceId string `protobuf:"bytes,4,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // Maximum concurrent workers allowed for this trace ID + MaxWorkerForTraceId int64 `protobuf:"varint,5,opt,name=max_worker_for_trace_id,json=maxWorkerForTraceId,proto3" json:"max_worker_for_trace_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BorrowWorkerRequest) Reset() { + *x = BorrowWorkerRequest{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BorrowWorkerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BorrowWorkerRequest) ProtoMessage() {} + +func (x *BorrowWorkerRequest) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BorrowWorkerRequest.ProtoReflect.Descriptor instead. +func (*BorrowWorkerRequest) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{0} +} + +func (x *BorrowWorkerRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *BorrowWorkerRequest) GetOrganizationId() string { + if x != nil { + return x.OrganizationId + } + return "" +} + +func (x *BorrowWorkerRequest) GetApiKeyId() string { + if x != nil { + return x.ApiKeyId + } + return "" +} + +func (x *BorrowWorkerRequest) GetTraceId() string { + if x != nil { + return x.TraceId + } + return "" +} + +func (x *BorrowWorkerRequest) GetMaxWorkerForTraceId() int64 { + if x != nil { + return x.MaxWorkerForTraceId + } + return 0 +} + +type BorrowWorkerResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique key identifying this worker slot; returned in ReturnWorker + WorkerKey string `protobuf:"bytes,1,opt,name=worker_key,json=workerKey,proto3" json:"worker_key,omitempty"` + // Outcome of the borrow attempt + Status BorrowStatus `protobuf:"varint,2,opt,name=status,proto3,enum=graph.substreams.data_service.sds.session.v1.BorrowStatus" json:"status,omitempty"` + // Capacity information about the worker pool + WorkerState *WorkerState `protobuf:"bytes,3,opt,name=worker_state,json=workerState,proto3" json:"worker_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BorrowWorkerResponse) Reset() { + *x = BorrowWorkerResponse{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BorrowWorkerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BorrowWorkerResponse) ProtoMessage() {} + +func (x *BorrowWorkerResponse) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BorrowWorkerResponse.ProtoReflect.Descriptor instead. +func (*BorrowWorkerResponse) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{1} +} + +func (x *BorrowWorkerResponse) GetWorkerKey() string { + if x != nil { + return x.WorkerKey + } + return "" +} + +func (x *BorrowWorkerResponse) GetStatus() BorrowStatus { + if x != nil { + return x.Status + } + return BorrowStatus_BORROW_STATUS_UNSPECIFIED +} + +func (x *BorrowWorkerResponse) GetWorkerState() *WorkerState { + if x != nil { + return x.WorkerState + } + return nil +} + +// WorkerState describes the current capacity of the worker pool. +type WorkerState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Maximum number of workers available in the pool + MaxWorkers int64 `protobuf:"varint,1,opt,name=max_workers,json=maxWorkers,proto3" json:"max_workers,omitempty"` + // Number of workers currently in use + ActiveWorkers int64 `protobuf:"varint,2,opt,name=active_workers,json=activeWorkers,proto3" json:"active_workers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerState) Reset() { + *x = WorkerState{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerState) ProtoMessage() {} + +func (x *WorkerState) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerState.ProtoReflect.Descriptor instead. +func (*WorkerState) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{2} +} + +func (x *WorkerState) GetMaxWorkers() int64 { + if x != nil { + return x.MaxWorkers + } + return 0 +} + +func (x *WorkerState) GetActiveWorkers() int64 { + if x != nil { + return x.ActiveWorkers + } + return 0 +} + +// ReturnWorkerRequest releases a previously borrowed worker slot. +type ReturnWorkerRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Worker key returned from BorrowWorker + WorkerKey string `protobuf:"bytes,1,opt,name=worker_key,json=workerKey,proto3" json:"worker_key,omitempty"` + // Minimum duration the worker should have been active before being released. + // Used to enforce minimum billing windows. + MinimalWorkerLifeDuration *durationpb.Duration `protobuf:"bytes,2,opt,name=minimal_worker_life_duration,json=minimalWorkerLifeDuration,proto3" json:"minimal_worker_life_duration,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReturnWorkerRequest) Reset() { + *x = ReturnWorkerRequest{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReturnWorkerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReturnWorkerRequest) ProtoMessage() {} + +func (x *ReturnWorkerRequest) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReturnWorkerRequest.ProtoReflect.Descriptor instead. +func (*ReturnWorkerRequest) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{3} +} + +func (x *ReturnWorkerRequest) GetWorkerKey() string { + if x != nil { + return x.WorkerKey + } + return "" +} + +func (x *ReturnWorkerRequest) GetMinimalWorkerLifeDuration() *durationpb.Duration { + if x != nil { + return x.MinimalWorkerLifeDuration + } + return nil +} + +type ReturnWorkerResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReturnWorkerResponse) Reset() { + *x = ReturnWorkerResponse{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReturnWorkerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReturnWorkerResponse) ProtoMessage() {} + +func (x *ReturnWorkerResponse) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReturnWorkerResponse.ProtoReflect.Descriptor instead. +func (*ReturnWorkerResponse) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{4} +} + +// KeepAliveRequest refreshes the session's last-seen timestamp. +type KeepAliveRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Worker key identifying the session to keep alive + WorkerKey string `protobuf:"bytes,1,opt,name=worker_key,json=workerKey,proto3" json:"worker_key,omitempty"` + // Optional sub-key or signer identifier + ApiKeyId string `protobuf:"bytes,2,opt,name=api_key_id,json=apiKeyId,proto3" json:"api_key_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KeepAliveRequest) Reset() { + *x = KeepAliveRequest{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KeepAliveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepAliveRequest) ProtoMessage() {} + +func (x *KeepAliveRequest) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepAliveRequest.ProtoReflect.Descriptor instead. +func (*KeepAliveRequest) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{5} +} + +func (x *KeepAliveRequest) GetWorkerKey() string { + if x != nil { + return x.WorkerKey + } + return "" +} + +func (x *KeepAliveRequest) GetApiKeyId() string { + if x != nil { + return x.ApiKeyId + } + return "" +} + +type KeepAliveResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KeepAliveResponse) Reset() { + *x = KeepAliveResponse{} + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KeepAliveResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepAliveResponse) ProtoMessage() {} + +func (x *KeepAliveResponse) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepAliveResponse.ProtoReflect.Descriptor instead. +func (*KeepAliveResponse) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP(), []int{6} +} + +var File_graph_substreams_data_service_sds_session_v1_session_proto protoreflect.FileDescriptor + +const file_graph_substreams_data_service_sds_session_v1_session_proto_rawDesc = "" + + "\n" + + ":graph/substreams/data_service/sds/session/v1/session.proto\x12,graph.substreams.data_service.sds.session.v1\x1a\x1egoogle/protobuf/duration.proto\"\xc7\x01\n" + + "\x13BorrowWorkerRequest\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\x12'\n" + + "\x0forganization_id\x18\x02 \x01(\tR\x0eorganizationId\x12\x1c\n" + + "\n" + + "api_key_id\x18\x03 \x01(\tR\bapiKeyId\x12\x19\n" + + "\btrace_id\x18\x04 \x01(\tR\atraceId\x124\n" + + "\x17max_worker_for_trace_id\x18\x05 \x01(\x03R\x13maxWorkerForTraceId\"\xe7\x01\n" + + "\x14BorrowWorkerResponse\x12\x1d\n" + + "\n" + + "worker_key\x18\x01 \x01(\tR\tworkerKey\x12R\n" + + "\x06status\x18\x02 \x01(\x0e2:.graph.substreams.data_service.sds.session.v1.BorrowStatusR\x06status\x12\\\n" + + "\fworker_state\x18\x03 \x01(\v29.graph.substreams.data_service.sds.session.v1.WorkerStateR\vworkerState\"U\n" + + "\vWorkerState\x12\x1f\n" + + "\vmax_workers\x18\x01 \x01(\x03R\n" + + "maxWorkers\x12%\n" + + "\x0eactive_workers\x18\x02 \x01(\x03R\ractiveWorkers\"\x90\x01\n" + + "\x13ReturnWorkerRequest\x12\x1d\n" + + "\n" + + "worker_key\x18\x01 \x01(\tR\tworkerKey\x12Z\n" + + "\x1cminimal_worker_life_duration\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x19minimalWorkerLifeDuration\"\x16\n" + + "\x14ReturnWorkerResponse\"O\n" + + "\x10KeepAliveRequest\x12\x1d\n" + + "\n" + + "worker_key\x18\x01 \x01(\tR\tworkerKey\x12\x1c\n" + + "\n" + + "api_key_id\x18\x02 \x01(\tR\bapiKeyId\"\x13\n" + + "\x11KeepAliveResponse*o\n" + + "\fBorrowStatus\x12\x1d\n" + + "\x19BORROW_STATUS_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16BORROW_STATUS_BORROWED\x10\x01\x12$\n" + + " BORROW_STATUS_RESOURCE_EXHAUSTED\x10\x022\xcf\x03\n" + + "\x0eSessionService\x12\x95\x01\n" + + "\fBorrowWorker\x12A.graph.substreams.data_service.sds.session.v1.BorrowWorkerRequest\x1aB.graph.substreams.data_service.sds.session.v1.BorrowWorkerResponse\x12\x95\x01\n" + + "\fReturnWorker\x12A.graph.substreams.data_service.sds.session.v1.ReturnWorkerRequest\x1aB.graph.substreams.data_service.sds.session.v1.ReturnWorkerResponse\x12\x8c\x01\n" + + "\tKeepAlive\x12>.graph.substreams.data_service.sds.session.v1.KeepAliveRequest\x1a?.graph.substreams.data_service.sds.session.v1.KeepAliveResponseB\xff\x02\n" + + "0com.graph.substreams.data_service.sds.session.v1B\fSessionProtoP\x01Zjgithub.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1;sessionv1\xa2\x02\x05GSDSS\xaa\x02+Graph.Substreams.DataService.Sds.Session.V1\xca\x02+Graph\\Substreams\\DataService\\Sds\\Session\\V1\xe2\x027Graph\\Substreams\\DataService\\Sds\\Session\\V1\\GPBMetadata\xea\x020Graph::Substreams::DataService::Sds::Session::V1b\x06proto3" + +var ( + file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescOnce sync.Once + file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescData []byte +) + +func file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescGZIP() []byte { + file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescOnce.Do(func() { + file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_session_v1_session_proto_rawDesc), len(file_graph_substreams_data_service_sds_session_v1_session_proto_rawDesc))) + }) + return file_graph_substreams_data_service_sds_session_v1_session_proto_rawDescData +} + +var file_graph_substreams_data_service_sds_session_v1_session_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_graph_substreams_data_service_sds_session_v1_session_proto_goTypes = []any{ + (BorrowStatus)(0), // 0: graph.substreams.data_service.sds.session.v1.BorrowStatus + (*BorrowWorkerRequest)(nil), // 1: graph.substreams.data_service.sds.session.v1.BorrowWorkerRequest + (*BorrowWorkerResponse)(nil), // 2: graph.substreams.data_service.sds.session.v1.BorrowWorkerResponse + (*WorkerState)(nil), // 3: graph.substreams.data_service.sds.session.v1.WorkerState + (*ReturnWorkerRequest)(nil), // 4: graph.substreams.data_service.sds.session.v1.ReturnWorkerRequest + (*ReturnWorkerResponse)(nil), // 5: graph.substreams.data_service.sds.session.v1.ReturnWorkerResponse + (*KeepAliveRequest)(nil), // 6: graph.substreams.data_service.sds.session.v1.KeepAliveRequest + (*KeepAliveResponse)(nil), // 7: graph.substreams.data_service.sds.session.v1.KeepAliveResponse + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration +} +var file_graph_substreams_data_service_sds_session_v1_session_proto_depIdxs = []int32{ + 0, // 0: graph.substreams.data_service.sds.session.v1.BorrowWorkerResponse.status:type_name -> graph.substreams.data_service.sds.session.v1.BorrowStatus + 3, // 1: graph.substreams.data_service.sds.session.v1.BorrowWorkerResponse.worker_state:type_name -> graph.substreams.data_service.sds.session.v1.WorkerState + 8, // 2: graph.substreams.data_service.sds.session.v1.ReturnWorkerRequest.minimal_worker_life_duration:type_name -> google.protobuf.Duration + 1, // 3: graph.substreams.data_service.sds.session.v1.SessionService.BorrowWorker:input_type -> graph.substreams.data_service.sds.session.v1.BorrowWorkerRequest + 4, // 4: graph.substreams.data_service.sds.session.v1.SessionService.ReturnWorker:input_type -> graph.substreams.data_service.sds.session.v1.ReturnWorkerRequest + 6, // 5: graph.substreams.data_service.sds.session.v1.SessionService.KeepAlive:input_type -> graph.substreams.data_service.sds.session.v1.KeepAliveRequest + 2, // 6: graph.substreams.data_service.sds.session.v1.SessionService.BorrowWorker:output_type -> graph.substreams.data_service.sds.session.v1.BorrowWorkerResponse + 5, // 7: graph.substreams.data_service.sds.session.v1.SessionService.ReturnWorker:output_type -> graph.substreams.data_service.sds.session.v1.ReturnWorkerResponse + 7, // 8: graph.substreams.data_service.sds.session.v1.SessionService.KeepAlive:output_type -> graph.substreams.data_service.sds.session.v1.KeepAliveResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_graph_substreams_data_service_sds_session_v1_session_proto_init() } +func file_graph_substreams_data_service_sds_session_v1_session_proto_init() { + if File_graph_substreams_data_service_sds_session_v1_session_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_session_v1_session_proto_rawDesc), len(file_graph_substreams_data_service_sds_session_v1_session_proto_rawDesc)), + NumEnums: 1, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_graph_substreams_data_service_sds_session_v1_session_proto_goTypes, + DependencyIndexes: file_graph_substreams_data_service_sds_session_v1_session_proto_depIdxs, + EnumInfos: file_graph_substreams_data_service_sds_session_v1_session_proto_enumTypes, + MessageInfos: file_graph_substreams_data_service_sds_session_v1_session_proto_msgTypes, + }.Build() + File_graph_substreams_data_service_sds_session_v1_session_proto = out.File + file_graph_substreams_data_service_sds_session_v1_session_proto_goTypes = nil + file_graph_substreams_data_service_sds_session_v1_session_proto_depIdxs = nil +} diff --git a/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect/session.connect.go b/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect/session.connect.go new file mode 100644 index 0000000..ede093f --- /dev/null +++ b/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect/session.connect.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: graph/substreams/data_service/sds/session/v1/session.proto + +package sessionv1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // SessionServiceName is the fully-qualified name of the SessionService service. + SessionServiceName = "graph.substreams.data_service.sds.session.v1.SessionService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // SessionServiceBorrowWorkerProcedure is the fully-qualified name of the SessionService's + // BorrowWorker RPC. + SessionServiceBorrowWorkerProcedure = "/graph.substreams.data_service.sds.session.v1.SessionService/BorrowWorker" + // SessionServiceReturnWorkerProcedure is the fully-qualified name of the SessionService's + // ReturnWorker RPC. + SessionServiceReturnWorkerProcedure = "/graph.substreams.data_service.sds.session.v1.SessionService/ReturnWorker" + // SessionServiceKeepAliveProcedure is the fully-qualified name of the SessionService's KeepAlive + // RPC. + SessionServiceKeepAliveProcedure = "/graph.substreams.data_service.sds.session.v1.SessionService/KeepAlive" +) + +// SessionServiceClient is a client for the +// graph.substreams.data_service.sds.session.v1.SessionService service. +type SessionServiceClient interface { + // BorrowWorker acquires a worker slot for a new streaming request. + // Returns resource_exhausted if quota is exceeded. + BorrowWorker(context.Context, *connect.Request[v1.BorrowWorkerRequest]) (*connect.Response[v1.BorrowWorkerResponse], error) + // ReturnWorker releases a previously borrowed worker slot. + ReturnWorker(context.Context, *connect.Request[v1.ReturnWorkerRequest]) (*connect.Response[v1.ReturnWorkerResponse], error) + // KeepAlive refreshes the session's last-seen timestamp. + KeepAlive(context.Context, *connect.Request[v1.KeepAliveRequest]) (*connect.Response[v1.KeepAliveResponse], error) +} + +// NewSessionServiceClient constructs a client for the +// graph.substreams.data_service.sds.session.v1.SessionService service. By default, it uses the +// Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends +// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewSessionServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) SessionServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + sessionServiceMethods := v1.File_graph_substreams_data_service_sds_session_v1_session_proto.Services().ByName("SessionService").Methods() + return &sessionServiceClient{ + borrowWorker: connect.NewClient[v1.BorrowWorkerRequest, v1.BorrowWorkerResponse]( + httpClient, + baseURL+SessionServiceBorrowWorkerProcedure, + connect.WithSchema(sessionServiceMethods.ByName("BorrowWorker")), + connect.WithClientOptions(opts...), + ), + returnWorker: connect.NewClient[v1.ReturnWorkerRequest, v1.ReturnWorkerResponse]( + httpClient, + baseURL+SessionServiceReturnWorkerProcedure, + connect.WithSchema(sessionServiceMethods.ByName("ReturnWorker")), + connect.WithClientOptions(opts...), + ), + keepAlive: connect.NewClient[v1.KeepAliveRequest, v1.KeepAliveResponse]( + httpClient, + baseURL+SessionServiceKeepAliveProcedure, + connect.WithSchema(sessionServiceMethods.ByName("KeepAlive")), + connect.WithClientOptions(opts...), + ), + } +} + +// sessionServiceClient implements SessionServiceClient. +type sessionServiceClient struct { + borrowWorker *connect.Client[v1.BorrowWorkerRequest, v1.BorrowWorkerResponse] + returnWorker *connect.Client[v1.ReturnWorkerRequest, v1.ReturnWorkerResponse] + keepAlive *connect.Client[v1.KeepAliveRequest, v1.KeepAliveResponse] +} + +// BorrowWorker calls graph.substreams.data_service.sds.session.v1.SessionService.BorrowWorker. +func (c *sessionServiceClient) BorrowWorker(ctx context.Context, req *connect.Request[v1.BorrowWorkerRequest]) (*connect.Response[v1.BorrowWorkerResponse], error) { + return c.borrowWorker.CallUnary(ctx, req) +} + +// ReturnWorker calls graph.substreams.data_service.sds.session.v1.SessionService.ReturnWorker. +func (c *sessionServiceClient) ReturnWorker(ctx context.Context, req *connect.Request[v1.ReturnWorkerRequest]) (*connect.Response[v1.ReturnWorkerResponse], error) { + return c.returnWorker.CallUnary(ctx, req) +} + +// KeepAlive calls graph.substreams.data_service.sds.session.v1.SessionService.KeepAlive. +func (c *sessionServiceClient) KeepAlive(ctx context.Context, req *connect.Request[v1.KeepAliveRequest]) (*connect.Response[v1.KeepAliveResponse], error) { + return c.keepAlive.CallUnary(ctx, req) +} + +// SessionServiceHandler is an implementation of the +// graph.substreams.data_service.sds.session.v1.SessionService service. +type SessionServiceHandler interface { + // BorrowWorker acquires a worker slot for a new streaming request. + // Returns resource_exhausted if quota is exceeded. + BorrowWorker(context.Context, *connect.Request[v1.BorrowWorkerRequest]) (*connect.Response[v1.BorrowWorkerResponse], error) + // ReturnWorker releases a previously borrowed worker slot. + ReturnWorker(context.Context, *connect.Request[v1.ReturnWorkerRequest]) (*connect.Response[v1.ReturnWorkerResponse], error) + // KeepAlive refreshes the session's last-seen timestamp. + KeepAlive(context.Context, *connect.Request[v1.KeepAliveRequest]) (*connect.Response[v1.KeepAliveResponse], error) +} + +// NewSessionServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewSessionServiceHandler(svc SessionServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + sessionServiceMethods := v1.File_graph_substreams_data_service_sds_session_v1_session_proto.Services().ByName("SessionService").Methods() + sessionServiceBorrowWorkerHandler := connect.NewUnaryHandler( + SessionServiceBorrowWorkerProcedure, + svc.BorrowWorker, + connect.WithSchema(sessionServiceMethods.ByName("BorrowWorker")), + connect.WithHandlerOptions(opts...), + ) + sessionServiceReturnWorkerHandler := connect.NewUnaryHandler( + SessionServiceReturnWorkerProcedure, + svc.ReturnWorker, + connect.WithSchema(sessionServiceMethods.ByName("ReturnWorker")), + connect.WithHandlerOptions(opts...), + ) + sessionServiceKeepAliveHandler := connect.NewUnaryHandler( + SessionServiceKeepAliveProcedure, + svc.KeepAlive, + connect.WithSchema(sessionServiceMethods.ByName("KeepAlive")), + connect.WithHandlerOptions(opts...), + ) + return "/graph.substreams.data_service.sds.session.v1.SessionService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case SessionServiceBorrowWorkerProcedure: + sessionServiceBorrowWorkerHandler.ServeHTTP(w, r) + case SessionServiceReturnWorkerProcedure: + sessionServiceReturnWorkerHandler.ServeHTTP(w, r) + case SessionServiceKeepAliveProcedure: + sessionServiceKeepAliveHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedSessionServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedSessionServiceHandler struct{} + +func (UnimplementedSessionServiceHandler) BorrowWorker(context.Context, *connect.Request[v1.BorrowWorkerRequest]) (*connect.Response[v1.BorrowWorkerResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("graph.substreams.data_service.sds.session.v1.SessionService.BorrowWorker is not implemented")) +} + +func (UnimplementedSessionServiceHandler) ReturnWorker(context.Context, *connect.Request[v1.ReturnWorkerRequest]) (*connect.Response[v1.ReturnWorkerResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("graph.substreams.data_service.sds.session.v1.SessionService.ReturnWorker is not implemented")) +} + +func (UnimplementedSessionServiceHandler) KeepAlive(context.Context, *connect.Request[v1.KeepAliveRequest]) (*connect.Response[v1.KeepAliveResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("graph.substreams.data_service.sds.session.v1.SessionService.KeepAlive is not implemented")) +} diff --git a/pb/graph/substreams/data_service/sds/usage/v1/usage.pb.go b/pb/graph/substreams/data_service/sds/usage/v1/usage.pb.go new file mode 100644 index 0000000..87de2d8 --- /dev/null +++ b/pb/graph/substreams/data_service/sds/usage/v1/usage.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: graph/substreams/data_service/sds/usage/v1/usage.proto + +package usagev1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ReportRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Batched metering events + Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportRequest) Reset() { + *x = ReportRequest{} + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportRequest) ProtoMessage() {} + +func (x *ReportRequest) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReportRequest.ProtoReflect.Descriptor instead. +func (*ReportRequest) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescGZIP(), []int{0} +} + +func (x *ReportRequest) GetEvents() []*Event { + if x != nil { + return x.Events + } + return nil +} + +type ReportResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Whether the session/key has been revoked + Revoked bool `protobuf:"varint,1,opt,name=revoked,proto3" json:"revoked,omitempty"` + // If revoked, the reason for revocation + RevocationReason string `protobuf:"bytes,2,opt,name=revocation_reason,json=revocationReason,proto3" json:"revocation_reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReportResponse) Reset() { + *x = ReportResponse{} + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReportResponse) ProtoMessage() {} + +func (x *ReportResponse) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReportResponse.ProtoReflect.Descriptor instead. +func (*ReportResponse) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescGZIP(), []int{1} +} + +func (x *ReportResponse) GetRevoked() bool { + if x != nil { + return x.Revoked + } + return false +} + +func (x *ReportResponse) GetRevocationReason() string { + if x != nil { + return x.RevocationReason + } + return "" +} + +// Event represents a single metered usage event as reported by the dmetering plugin. +type Event struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Payer Ethereum address (from auth context) + OrganizationId string `protobuf:"bytes,1,opt,name=organization_id,json=organizationId,proto3" json:"organization_id,omitempty"` + // Optional sub-key or signer identifier + ApiKeyId string `protobuf:"bytes,2,opt,name=api_key_id,json=apiKeyId,proto3" json:"api_key_id,omitempty"` + // Client IP address + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // Endpoint called, e.g., "sf.substreams.rpc.v2/Blocks" + Endpoint string `protobuf:"bytes,4,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Network identifier, e.g., "eth-mainnet" + Network string `protobuf:"bytes,5,opt,name=network,proto3" json:"network,omitempty"` + // Arbitrary metadata + Meta string `protobuf:"bytes,7,opt,name=meta,proto3" json:"meta,omitempty"` + // Provider Ethereum address + Provider string `protobuf:"bytes,8,opt,name=provider,proto3" json:"provider,omitempty"` + // Output module hash (for Substreams) + OutputModuleHash string `protobuf:"bytes,9,opt,name=output_module_hash,json=outputModuleHash,proto3" json:"output_module_hash,omitempty"` + // Individual metrics (blocks_count, bytes_sent, etc.) + Metrics []*Metric `protobuf:"bytes,20,rep,name=metrics,proto3" json:"metrics,omitempty"` + // Event timestamp + Timestamp *timestamppb.Timestamp `protobuf:"bytes,30,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Event) Reset() { + *x = Event{} + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescGZIP(), []int{2} +} + +func (x *Event) GetOrganizationId() string { + if x != nil { + return x.OrganizationId + } + return "" +} + +func (x *Event) GetApiKeyId() string { + if x != nil { + return x.ApiKeyId + } + return "" +} + +func (x *Event) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *Event) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *Event) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *Event) GetMeta() string { + if x != nil { + return x.Meta + } + return "" +} + +func (x *Event) GetProvider() string { + if x != nil { + return x.Provider + } + return "" +} + +func (x *Event) GetOutputModuleHash() string { + if x != nil { + return x.OutputModuleHash + } + return "" +} + +func (x *Event) GetMetrics() []*Metric { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *Event) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +// Metric is a single named counter value within a usage event. +type Metric struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Metric) Reset() { + *x = Metric{} + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescGZIP(), []int{3} +} + +func (x *Metric) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Metric) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_graph_substreams_data_service_sds_usage_v1_usage_proto protoreflect.FileDescriptor + +const file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDesc = "" + + "\n" + + "6graph/substreams/data_service/sds/usage/v1/usage.proto\x12*graph.substreams.data_service.sds.usage.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"Z\n" + + "\rReportRequest\x12I\n" + + "\x06events\x18\x01 \x03(\v21.graph.substreams.data_service.sds.usage.v1.EventR\x06events\"W\n" + + "\x0eReportResponse\x12\x18\n" + + "\arevoked\x18\x01 \x01(\bR\arevoked\x12+\n" + + "\x11revocation_reason\x18\x02 \x01(\tR\x10revocationReason\"\x89\x03\n" + + "\x05Event\x12'\n" + + "\x0forganization_id\x18\x01 \x01(\tR\x0eorganizationId\x12\x1c\n" + + "\n" + + "api_key_id\x18\x02 \x01(\tR\bapiKeyId\x12\x1d\n" + + "\n" + + "ip_address\x18\x03 \x01(\tR\tipAddress\x12\x1a\n" + + "\bendpoint\x18\x04 \x01(\tR\bendpoint\x12\x18\n" + + "\anetwork\x18\x05 \x01(\tR\anetwork\x12\x12\n" + + "\x04meta\x18\a \x01(\tR\x04meta\x12\x1a\n" + + "\bprovider\x18\b \x01(\tR\bprovider\x12,\n" + + "\x12output_module_hash\x18\t \x01(\tR\x10outputModuleHash\x12L\n" + + "\ametrics\x18\x14 \x03(\v22.graph.substreams.data_service.sds.usage.v1.MetricR\ametrics\x128\n" + + "\ttimestamp\x18\x1e \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\"2\n" + + "\x06Metric\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value2\x8f\x01\n" + + "\fUsageService\x12\x7f\n" + + "\x06Report\x129.graph.substreams.data_service.sds.usage.v1.ReportRequest\x1a:.graph.substreams.data_service.sds.usage.v1.ReportResponseB\xef\x02\n" + + ".com.graph.substreams.data_service.sds.usage.v1B\n" + + "UsageProtoP\x01Zfgithub.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1;usagev1\xa2\x02\x05GSDSU\xaa\x02)Graph.Substreams.DataService.Sds.Usage.V1\xca\x02)Graph\\Substreams\\DataService\\Sds\\Usage\\V1\xe2\x025Graph\\Substreams\\DataService\\Sds\\Usage\\V1\\GPBMetadata\xea\x02.Graph::Substreams::DataService::Sds::Usage::V1b\x06proto3" + +var ( + file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescOnce sync.Once + file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescData []byte +) + +func file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescGZIP() []byte { + file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescOnce.Do(func() { + file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDesc), len(file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDesc))) + }) + return file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDescData +} + +var file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_graph_substreams_data_service_sds_usage_v1_usage_proto_goTypes = []any{ + (*ReportRequest)(nil), // 0: graph.substreams.data_service.sds.usage.v1.ReportRequest + (*ReportResponse)(nil), // 1: graph.substreams.data_service.sds.usage.v1.ReportResponse + (*Event)(nil), // 2: graph.substreams.data_service.sds.usage.v1.Event + (*Metric)(nil), // 3: graph.substreams.data_service.sds.usage.v1.Metric + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp +} +var file_graph_substreams_data_service_sds_usage_v1_usage_proto_depIdxs = []int32{ + 2, // 0: graph.substreams.data_service.sds.usage.v1.ReportRequest.events:type_name -> graph.substreams.data_service.sds.usage.v1.Event + 3, // 1: graph.substreams.data_service.sds.usage.v1.Event.metrics:type_name -> graph.substreams.data_service.sds.usage.v1.Metric + 4, // 2: graph.substreams.data_service.sds.usage.v1.Event.timestamp:type_name -> google.protobuf.Timestamp + 0, // 3: graph.substreams.data_service.sds.usage.v1.UsageService.Report:input_type -> graph.substreams.data_service.sds.usage.v1.ReportRequest + 1, // 4: graph.substreams.data_service.sds.usage.v1.UsageService.Report:output_type -> graph.substreams.data_service.sds.usage.v1.ReportResponse + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_graph_substreams_data_service_sds_usage_v1_usage_proto_init() } +func file_graph_substreams_data_service_sds_usage_v1_usage_proto_init() { + if File_graph_substreams_data_service_sds_usage_v1_usage_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDesc), len(file_graph_substreams_data_service_sds_usage_v1_usage_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_graph_substreams_data_service_sds_usage_v1_usage_proto_goTypes, + DependencyIndexes: file_graph_substreams_data_service_sds_usage_v1_usage_proto_depIdxs, + MessageInfos: file_graph_substreams_data_service_sds_usage_v1_usage_proto_msgTypes, + }.Build() + File_graph_substreams_data_service_sds_usage_v1_usage_proto = out.File + file_graph_substreams_data_service_sds_usage_v1_usage_proto_goTypes = nil + file_graph_substreams_data_service_sds_usage_v1_usage_proto_depIdxs = nil +} diff --git a/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect/usage.connect.go b/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect/usage.connect.go new file mode 100644 index 0000000..cf8e3d0 --- /dev/null +++ b/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect/usage.connect.go @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: graph/substreams/data_service/sds/usage/v1/usage.proto + +package usagev1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // UsageServiceName is the fully-qualified name of the UsageService service. + UsageServiceName = "graph.substreams.data_service.sds.usage.v1.UsageService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // UsageServiceReportProcedure is the fully-qualified name of the UsageService's Report RPC. + UsageServiceReportProcedure = "/graph.substreams.data_service.sds.usage.v1.UsageService/Report" +) + +// UsageServiceClient is a client for the graph.substreams.data_service.sds.usage.v1.UsageService +// service. +type UsageServiceClient interface { + // Report receives a batch of metering events from the firehose-core dmetering plugin. + Report(context.Context, *connect.Request[v1.ReportRequest]) (*connect.Response[v1.ReportResponse], error) +} + +// NewUsageServiceClient constructs a client for the +// graph.substreams.data_service.sds.usage.v1.UsageService service. By default, it uses the Connect +// protocol with the binary Protobuf Codec, asks for gzipped responses, and sends uncompressed +// requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewUsageServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) UsageServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + usageServiceMethods := v1.File_graph_substreams_data_service_sds_usage_v1_usage_proto.Services().ByName("UsageService").Methods() + return &usageServiceClient{ + report: connect.NewClient[v1.ReportRequest, v1.ReportResponse]( + httpClient, + baseURL+UsageServiceReportProcedure, + connect.WithSchema(usageServiceMethods.ByName("Report")), + connect.WithClientOptions(opts...), + ), + } +} + +// usageServiceClient implements UsageServiceClient. +type usageServiceClient struct { + report *connect.Client[v1.ReportRequest, v1.ReportResponse] +} + +// Report calls graph.substreams.data_service.sds.usage.v1.UsageService.Report. +func (c *usageServiceClient) Report(ctx context.Context, req *connect.Request[v1.ReportRequest]) (*connect.Response[v1.ReportResponse], error) { + return c.report.CallUnary(ctx, req) +} + +// UsageServiceHandler is an implementation of the +// graph.substreams.data_service.sds.usage.v1.UsageService service. +type UsageServiceHandler interface { + // Report receives a batch of metering events from the firehose-core dmetering plugin. + Report(context.Context, *connect.Request[v1.ReportRequest]) (*connect.Response[v1.ReportResponse], error) +} + +// NewUsageServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewUsageServiceHandler(svc UsageServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + usageServiceMethods := v1.File_graph_substreams_data_service_sds_usage_v1_usage_proto.Services().ByName("UsageService").Methods() + usageServiceReportHandler := connect.NewUnaryHandler( + UsageServiceReportProcedure, + svc.Report, + connect.WithSchema(usageServiceMethods.ByName("Report")), + connect.WithHandlerOptions(opts...), + ) + return "/graph.substreams.data_service.sds.usage.v1.UsageService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case UsageServiceReportProcedure: + usageServiceReportHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedUsageServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedUsageServiceHandler struct{} + +func (UnimplementedUsageServiceHandler) Report(context.Context, *connect.Request[v1.ReportRequest]) (*connect.Response[v1.ReportResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("graph.substreams.data_service.sds.usage.v1.UsageService.Report is not implemented")) +} diff --git a/plans/provider-firecore-plugins.md b/plans/provider-firecore-plugins.md new file mode 100644 index 0000000..6aa25e5 --- /dev/null +++ b/plans/provider-firecore-plugins.md @@ -0,0 +1,737 @@ +# Implementation Plan: provider-firecore-plugins + +## ULTIMATE GOAL + +Implement the **server-side endpoints** that firehose-core's auth, metering, and session plugins will call when configured with `tgm://localhost:` (and later `sds://`). + +**Key Clarification**: We are NOT implementing firehose-core plugins. The plugins already exist in firehose-core. We are implementing the **gRPC/HTTP services** that those plugins call as clients. + +The services will: +- Be served by the SDS provider sidecar +- Translate plugin calls to internal SDS session/payment validation logic +- Enable firehose-core tier1 to use `tgm://localhost:` pointing to local SDS provider sidecar + +## Status: IMPLEMENTED (commit d313c011) + +All Priority 0-5 items are fully implemented and tested. See commit `d313c011`. + +### Implemented: +- `provider/repository`: GlobalRepository interface + InMemoryRepository (haxmap-backed) +- `provider/auth`: AuthService gRPC (EIP-712 RAV validation, signer caching) +- `provider/usage`: UsageService gRPC (dmetering batched events) +- `provider/session`: SessionService gRPC (BorrowWorker/ReturnWorker/KeepAlive + quotas) +- `provider/sidecar`: All three services wired into ConnectWeb server +- Proto definitions: sds/auth/v1, sds/usage/v1, sds/session/v1 + +### Known Issue: Git Repository Corruption +The git repo has a corrupt pack file (`df33225d...`). The parent commit of the "Plan for plugin +first implementation" commit is unreadable. A graft file was added at `.git/info/grafts` to allow +git operations to work. The new implementation commit `d313c011` was created via `git commit-tree` +to bypass the corrupt ancestry. + +--- + +## Architecture Decision: RESOLVED ✓ + +### Full Flow (from docs/flowchart.txt) + +``` +┌─────────────┐ ┌──────────────┐ ┌──────────────────┐ ┌──────────────┐ +│ Consumer │ │ Consumer │ │ Provider │ │ Provider │ +│ (substreams)│ │ Sidecar │ │ Sidecar │ │ (tier1) │ +└──────┬──────┘ └──────┬───────┘ └────────┬─────────┘ └──────┬───────┘ + │ │ │ │ + │ 1. init() │ │ │ + │──────────────────>│ │ │ + │ │ 2. startSession │ │ + │ │ (escrow, RAV0) │ │ + │ │─────────────────────>│ │ + │ │ │ │ + │ │ 3. useThis(RAVx) │ │ + │ │<─────────────────────│ │ + │ 4. RAVx │ │ │ + │<──────────────────│ │ │ + │ │ │ │ + │ 5. Blocks() with header x-sds-rav=RAVx │ │ + │─────────────────────────────────────────────────────────────────>│ + │ │ │ 6. validate RAVx │ + │ │ │<─────────────────────│ + │ │ │ 7. OK (payer, etc.) │ + │ │ │─────────────────────>│ + │ 8. data... │ │ │ + │<─────────────────────────────────────────────────────────────────│ +``` + +### Key Insight + +The consumer sends **RAV in a header** (`x-sds-rav=RAVx`) directly to tier1. The `sds://` dauth plugin: +1. Extracts RAV from `x-sds-rav` header +2. Calls provider sidecar to validate RAV +3. Receives back auth context (payer address, session info) +4. Populates trusted headers (`x-user-id`, etc.) + +### Architecture: Option A - New `sds://` dauth plugin ✓ + +**Note**: `sds://` will be an alias for `tgm://` - both call external gRPC services. The plugin pattern is the same, just different validation logic on the server side. + +**Plugin in firehose-core** (sds:// or tgm://): +- Extracts `x-sds-rav` header containing SignedRAV +- Calls gRPC `ValidateAuth(SignedRAV)` on configured endpoint +- Receives `AuthResponse{organization_id, api_key_id, ...}` +- Populates trusted headers + +**Provider sidecar implements**: +- gRPC `AuthService.ValidateAuth(SignedRAV) → AuthResponse` +- Validates EIP-712 signature, recovers signer +- Checks authorization (self-sign or on-chain delegation) +- Returns payer address as `organization_id` + +--- + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ firehose-core tier1 (Provider) │ +│ │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ dauth plugin │ │dmetering │ │ dsession │ │ +│ │ (sds://) │ │plugin(sds://)│ │ plugin(sds://)│ │ +│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ │ +└─────────┼─────────────────┼─────────────────┼───────────────────────┘ + │ gRPC │ gRPC │ gRPC + │ ValidateAuth │ Report │ BorrowWorker + │ (x-sds-rav) │ (usage events) │ ReturnWorker + ▼ ▼ ▼ +┌─────────────────────────────────────────────────────────────────────┐ +│ SDS Provider Sidecar (what we implement) │ +│ │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ AuthService │ │UsageService │ │Session │ │ +│ │ (gRPC) │ │(gRPC) │ │Service(gRPC) │ │ +│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │ +│ │ │ │ │ +│ └─────────────────┴─────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌─────────────────────────┐ │ +│ │ Internal SDS Logic │ │ +│ │ - RAV signature verify │ │ +│ │ - Session tracking │ │ +│ │ - Quota enforcement │ │ +│ │ - On-chain auth check │ │ +│ └─────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Global State Architecture + +### Overview + +A `GlobalRepository` interface provides the abstraction for live state management. This enables: +- Single in-memory deployment initially +- High-availability via Redis later (just swap implementation) + +### GlobalRepository Interface + +```go +// GlobalRepository provides global state storage for live session/client tracking. +// All methods are namespaced by domain (Session*, Client*, Quota*, etc.) +type GlobalRepository interface { + // Session management + SessionCreate(ctx context.Context, session *Session) error + SessionGet(ctx context.Context, sessionID string) (*Session, error) + SessionUpdate(ctx context.Context, session *Session) error + SessionDelete(ctx context.Context, sessionID string) error + SessionList(ctx context.Context, filter SessionFilter) ([]*Session, error) + SessionGetByPayer(ctx context.Context, payer string) ([]*Session, error) + + // Worker/connection tracking within sessions + WorkerCreate(ctx context.Context, worker *Worker) error + WorkerGet(ctx context.Context, workerKey string) (*Worker, error) + WorkerDelete(ctx context.Context, workerKey string) error + WorkerListBySession(ctx context.Context, sessionID string) ([]*Worker, error) + WorkerCountByPayer(ctx context.Context, payer string) (int, error) + + // Quota tracking + QuotaGet(ctx context.Context, payer string) (*QuotaUsage, error) + QuotaIncrement(ctx context.Context, payer string, sessions int, workers int) error + QuotaDecrement(ctx context.Context, payer string, sessions int, workers int) error + + // Usage accumulation (for metering) + UsageAdd(ctx context.Context, sessionID string, usage *UsageEvent) error + UsageGetTotal(ctx context.Context, sessionID string) (*UsageSummary, error) + + // Health/lifecycle + Ping(ctx context.Context) error + Close() error +} +``` + +### Domain Types + +```go +type Session struct { + ID string + PayerAddress string + SignerAddress string + ServiceProvider string + CreatedAt time.Time + LastKeepAlive time.Time + Status SessionStatus // active, terminated + Metadata map[string]string +} + +type Worker struct { + Key string + SessionID string + PayerAddress string + CreatedAt time.Time + TraceID string +} + +type QuotaUsage struct { + PayerAddress string + ActiveSessions int + ActiveWorkers int + LastUpdated time.Time +} + +type UsageEvent struct { + Timestamp time.Time + Blocks int64 + Bytes int64 + Requests int64 +} + +type UsageSummary struct { + TotalBlocks int64 + TotalBytes int64 + TotalRequests int64 +} + +type SessionFilter struct { + PayerAddress *string + Status *SessionStatus + CreatedAfter *time.Time +} + +type SessionStatus string + +const ( + SessionStatusActive SessionStatus = "active" + SessionStatusTerminated SessionStatus = "terminated" +) +``` + +### ConcurrentMap Type Alias + +Use https://github.com/alphadose/haxmap for lock-free concurrent map operations: + +```go +// ConcurrentMap is a type alias for high-performance concurrent hashmap +type ConcurrentMap[K comparable, V any] = *haxmap.Map[K, V] + +func NewConcurrentMap[K comparable, V any]() ConcurrentMap[K, V] { + return haxmap.New[K, V]() +} +``` + +### Implementations + +**Priority 1: InMemory (bootstrap)** +```go +type InMemoryRepository struct { + sessions ConcurrentMap[string, *Session] + workers ConcurrentMap[string, *Worker] + quotas ConcurrentMap[string, *QuotaUsage] + usage ConcurrentMap[string, []*UsageEvent] // may need sync for slice append +} + +func NewInMemoryRepository() *InMemoryRepository { + return &InMemoryRepository{ + sessions: NewConcurrentMap[string, *Session](), + workers: NewConcurrentMap[string, *Worker](), + quotas: NewConcurrentMap[string, *QuotaUsage](), + usage: NewConcurrentMap[string, []*UsageEvent](), + } +} +``` + +**Note:** For operations requiring atomic read-modify-write on slices (like `UsageAdd`), we may need a thin mutex wrapper or use haxmap's `GetOrCompute` pattern. + +**Future: Redis (high-availability)** +```go +type RedisRepository struct { + client *redis.Client + // Key patterns: + // session:{id} -> Session JSON + // sessions:payer:{address} -> Set of session IDs + // worker:{key} -> Worker JSON + // workers:session:{id} -> Set of worker keys + // quota:{payer} -> QuotaUsage JSON + // usage:{sessionID} -> List of UsageEvent JSON +} + +func NewRedisRepository(client *redis.Client) *RedisRepository +``` + +### File Structure + +``` +provider/ + repository/ + repository.go # GlobalRepository interface + domain types + inmemory.go # InMemoryRepository implementation + inmemory_test.go + # Future: + # redis.go # RedisRepository implementation + # redis_test.go +``` + +--- + +## Services to Implement + +### 1. Auth Service (gRPC) ✓ ARCHITECTURE DECIDED + +The `sds://` dauth plugin will call this gRPC service to validate the `x-sds-rav` header. + +**Header Format:** +- Header name: `x-sds-rav` +- Content: Raw bytes (SignedRAV proto) for machine-to-machine; base64 if user-provided + +**Proto Service:** +```proto +service AuthService { + rpc ValidateAuth(ValidateAuthRequest) returns (ValidateAuthResponse); +} + +message ValidateAuthRequest { + // SignedRAV extracted from `x-sds-rav` header + common.v1.SignedRAV payment_rav = 1; + // Client IP address + string ip_address = 2; + // Request path/endpoint + string path = 3; +} + +message ValidateAuthResponse { + // Payer address (0x...) → x-user-id / x-organization-id + string organization_id = 1; + // Optional for now, may be session ID or signer + string api_key_id = 2; + // Any additional context to pass through + map metadata = 3; +} +``` + +**SDS Implementation:** +```go +func (s *AuthService) ValidateAuth(ctx context.Context, req *ValidateAuthRequest) (*ValidateAuthResponse, error) { + // 1. Convert proto to horizon.SignedRAV + signedRAV := ProtoSignedRAVToHorizon(req.PaymentRav) + + // 2. Recover signer from EIP-712 signature + signer, err := signedRAV.RecoverSigner(s.domain) + if err != nil { + return nil, connect.NewError(connect.CodeUnauthenticated, fmt.Errorf("invalid signature: %w", err)) + } + + // 3. Check authorization (signer == payer, or on-chain delegation) + payer := signedRAV.Message.Payer + authorized, err := s.isSignerAuthorized(ctx, payer, signer) + if err != nil { + return nil, connect.NewError(connect.CodeInternal, err) + } + if !authorized { + return nil, connect.NewError(connect.CodePermissionDenied, fmt.Errorf("signer not authorized for payer")) + } + + // 4. Return auth context + return &ValidateAuthResponse{ + OrganizationId: payer.Hex(), + ApiKeyId: "", // empty for now + Metadata: map[string]string{ + "signer": signer.Hex(), + }, + }, nil +} +``` + +**Note:** This endpoint may already be similar to existing `ValidatePayment` in provider sidecar. We may be able to reuse/adapt that logic. + +--- + +### 2. Usage Service (gRPC) + +The dmetering `tgm://` plugin calls this gRPC service to report usage events. + +**Proto Service:** +```proto +service UsageService { + rpc Report(ReportRequest) returns (ReportResponse); +} + +message ReportRequest { + repeated sf.metering.v1.Event events = 1; +} + +message ReportResponse { + bool revoked = 1; + string revocation_reason = 2; +} +``` + +**Event Structure (from dmetering):** +```proto +message Event { + string organization_id = 1; // Payer address + string api_key_id = 2; // Session/signer identifier + string ip_address = 3; + string endpoint = 4; // e.g., "sf.substreams.rpc.v2/Blocks" + string network = 5; // e.g., "eth-mainnet" + string meta = 7; + string provider = 8; // Our provider address + string output_module_hash = 9; + repeated Metric metrics = 20; // blocks_count, bytes, etc. + google.protobuf.Timestamp timestamp = 30; +} +``` + +**SDS Implementation (uses GlobalRepository):** +- Receive batched events from metering plugin +- Store via `repo.UsageAdd()` for each event +- Check session status via `repo.SessionGet()` - return `revoked=true` if terminated +- Batching: Plugin batches with configurable delay (default 100ms) + +--- + +### 3. Session Service (gRPC) + +The dsession `tgm://` plugin calls this gRPC service for session/quota management. + +**Proto Service:** +```proto +service SessionService { + rpc BorrowWorker(BorrowWorkerRequest) returns (BorrowWorkerResponse); + rpc ReturnWorker(ReturnWorkerRequest) returns (ReturnWorkerResponse); + rpc KeepAlive(KeepAliveRequest) returns (KeepAliveResponse); +} +``` + +**BorrowWorker:** +```proto +message BorrowWorkerRequest { + string service = 1; // "substreams" + string organization_id = 2; // Payer address (from auth context) + string api_key_id = 3; // Optional, empty for now + string trace_id = 4; // Request trace ID + int64 max_worker_for_trace_id = 5; +} + +message BorrowWorkerResponse { + string worker_key = 1; // Session identifier + Status status = 2; // borrowed, resource_exhausted + WorkerState worker_state = 3; // MaxWorkers capacity info +} +``` + +**ReturnWorker:** +```proto +message ReturnWorkerRequest { + string worker_key = 1; + google.protobuf.Duration minimal_worker_life_duration = 2; +} +``` + +**KeepAlive:** +```proto +message KeepAliveRequest { + string worker_key = 1; + string api_key_id = 2; +} +``` + +**SDS Implementation (uses GlobalRepository):** +- BorrowWorker: `repo.SessionCreate()`, `repo.WorkerCreate()`, `repo.QuotaIncrement()`, enforce quota limits +- ReturnWorker: `repo.WorkerDelete()`, `repo.QuotaDecrement()`, report final usage +- KeepAlive: `repo.SessionUpdate()` to update `LastKeepAlive` timestamp + +**Quota Configuration (in pricing config):** +```yaml +quotas: + defaults: + max_concurrent_sessions: 10 + max_workers_per_session: 5 + overrides: + # Per-payer overrides + "0x1234abcd...": # payer address + max_concurrent_sessions: 50 + max_workers_per_session: 20 +``` + +**BorrowWorker Response Codes:** +- `borrowed`: Success, session/worker acquired +- `resource_exhausted`: Quota exceeded for this payer + +--- + +## Implementation Tasks + +### Priority 0: Global Repository + +#### 0.1 Define GlobalRepository interface +- [x] Create `provider/repository/` package +- [x] Define `GlobalRepository` interface with all methods +- [x] Define domain types (`Session`, `Worker`, `QuotaUsage`, `UsageEvent`, etc.) +- [x] All methods take `ctx context.Context` and return `error` + +#### 0.2 Implement InMemoryRepository +- [x] Add `github.com/alphadose/haxmap` dependency +- [x] Create `ConcurrentMap[K, V]` type alias +- [x] Implement all `GlobalRepository` methods using haxmap +- [x] Handle atomic slice operations (e.g., `UsageAdd`) with appropriate pattern +- [x] Write comprehensive tests + +**Files to create:** +- `provider/repository/repository.go` - Interface + types +- `provider/repository/inmemory.go` - InMemory implementation +- `provider/repository/inmemory_test.go` + +--- + +### Priority 1: Proto Definitions + +#### 1.1 Add/Import proto definitions +- [x] Import or define `sf.gateway.payment.v1.UsageService` proto +- [x] Import or define `sf.sds.session.v1.SessionService` proto +- [x] Import `sf.metering.v1.Event` proto from dmetering +- [x] Generate Go code with buf/protoc + +**Files:** +- `proto/sf/gateway/payment/v1/usage.proto` +- `proto/sf/sds/session/v1/session.proto` +- Or import from existing packages + +**Reference:** +- dsession: https://github.com/streamingfast/dsession +- Check if worker-pool-protocol has published protos + +--- + +### Priority 2: Auth Service (gRPC) ✓ UNBLOCKED + +#### 2.1 Define proto for AuthService +- [x] Create `proto/sf/sds/auth/v1/auth.proto` with `AuthService.ValidateAuth` +- [x] Generate Go code with buf/protoc +- [x] Or reuse/extend existing `ValidatePayment` proto + +#### 2.2 Implement AuthService gRPC +- [x] Create `provider/auth/` package +- [x] Implement `ValidateAuth` RPC handler +- [x] Reuse existing RAV validation logic from `handler_validate_payment.go` +- [x] Wire into provider sidecar gRPC server + +**Files to create:** +- `provider/auth/service.go` - gRPC handler +- `provider/auth/service_test.go` + +**Note:** Much of the logic already exists in `provider/sidecar/handler_validate_payment.go`. We may: +1. Create a new service that wraps existing logic +2. Or extend existing `ProviderSidecarService` with the auth endpoint +3. Or create an adapter that maps the new proto to existing calls + +--- + +### Priority 3: Usage Service (gRPC) + +#### 3.1 Implement UsageService gRPC +- [x] Create `provider/usage/` package +- [x] Implement `Report` RPC handler +- [x] Map `sf.metering.v1.Event` to internal usage tracking +- [x] Integrate with session state to check revocation +- [x] Wire into provider sidecar gRPC server + +**Files to create:** +- `provider/usage/service.go` - gRPC handler +- `provider/usage/mapper.go` - Event to internal usage mapping +- `provider/usage/service_test.go` + +--- + +### Priority 4: Session Service (gRPC) + +#### 4.1 Implement SessionService gRPC +- [x] Create `provider/session/` package +- [x] Implement `BorrowWorker` RPC handler +- [x] Implement `ReturnWorker` RPC handler +- [x] Implement `KeepAlive` RPC handler +- [x] Implement quota enforcement from pricing config +- [x] Support per-payer quota overrides +- [x] Integrate with internal session tracking +- [x] Wire into provider sidecar gRPC server + +**Files to create:** +- `provider/session/service.go` - gRPC handlers (uses GlobalRepository) +- `provider/session/quotas.go` - Quota config loading from provider-config +- `provider/session/service_test.go` + +--- + +### Priority 5: Integration + +#### 5.1 Wire services into provider sidecar +- [x] Add gRPC service registration for AuthService +- [x] Add gRPC service registration for UsageService +- [x] Add gRPC service registration for SessionService +- [x] Add configuration for enabling/disabling each service +- [x] Add logging using `logging.PackageLogger` pattern + +#### 5.2 Integration testing +- [x] Test auth flow with actual dauth plugin config +- [x] Test metering flow with dmetering plugin +- [x] Test session flow with dsession plugin +- [x] End-to-end test with firehose-core tier1 + +--- + +## Configuration + +The provider sidecar will serve these endpoints on its existing port(s): + +```bash +# firehose-core tier1 configuration +--common-auth-plugin="tgm://localhost:9001" +--common-metering-plugin="tgm://localhost:9001?network=eth-mainnet" +--common-session-plugin="tgm://localhost:9001" +``` + +Provider sidecar flags (to add - depends on architecture): +```bash +# RAV validation (always needed) +--eip712-domain-chain-id=1 +--eip712-domain-verifying-contract=0x... + +# Quota configuration +--quotas-config=/path/to/quotas.yaml +``` + +--- + +## File Structure (Summary) + +``` +provider/ + repository/ + repository.go # GlobalRepository interface + domain types + inmemory.go # InMemoryRepository implementation + inmemory_test.go + # Future: redis.go, redis_test.go + auth/ + service.go # gRPC AuthService.ValidateAuth implementation + service_test.go + usage/ + service.go # gRPC UsageService.Report implementation + mapper.go # Event mapping to internal usage + service_test.go + session/ + service.go # gRPC SessionService implementation + quotas.go # Quota limits from provider-config + service_test.go + +proto/ + sf/sds/auth/v1/ + auth.proto # AuthService definition (or extend existing) + sf/gateway/payment/v1/ + usage.proto # UsageService definition + sf/sds/session/v1/ + session.proto # SessionService definition +``` + +--- + +## Key Decisions Made + +1. **Auth via gRPC**: `sds://` dauth plugin calls `AuthService.ValidateAuth` with RAV from `x-sds-rav` header +2. **RAV-based auth**: EIP-712 signature validation, no JWT +3. **`sds://` = alias for `tgm://`**: Both call external gRPC services, same pattern +4. **Metering is batched**: Plugin batches with configurable flush interval (default 100ms) +5. **dsession package**: https://github.com/streamingfast/dsession +6. **organization_id**: Maps to payer address (from RAV) +7. **api_key_id**: Optional/empty for now (may need firehose-core change to make optional) +8. **Quota/limits**: Provider-configurable via provider-config (includes pricing + quotas) +9. **Header name**: `x-sds-rav` - raw bytes for machine-to-machine; base64 if user-provided +10. **AuthService**: Separate gRPC service (not extending ProviderSidecarService) +11. **GlobalRepository**: Interface-based state storage; InMemory first, Redis for HA later +12. **All repo methods**: Take `ctx context.Context`, return `error` +13. **ConcurrentMap**: Type alias using `github.com/alphadose/haxmap` for lock-free concurrent maps + +--- + +## Open Questions (Resolved) + +| Question | Answer | +|----------|--------| +| Auth architecture | **Option A**: `sds://` plugin calls gRPC `ValidateAuth` on provider sidecar | +| RAV flow | Consumer sends `x-sds-rav=RAVx` header to tier1; tier1 calls sidecar to validate | +| JWT key management | **No JWT** - Auth is RAV signature-based | +| Session vs Auth boundary | Auth validates RAV + sets payer context. Session manages lifecycle. | +| Metering granularity | Batched with configurable flush (like tgm-gateway, default 100ms) | +| dsession package location | https://github.com/streamingfast/dsession | +| Scheme naming | `sds://` and `tgm://` are aliases - both call external gRPC services | +| organization_id mapping | Payer address from RAV | +| api_key_id mapping | Optional/empty for now | +| Quota/limits | Provider-config (global config including pricing + quotas) | + +--- + +## All Questions Resolved ✓ + +| Question | Decision | +|----------|----------| +| Header name & encoding | `x-sds-rav` - raw bytes for machine-to-machine (gRPC); base64 if user/operator provides manually | +| Provider config structure | Single config with quotas + pricing | +| AuthService location | Separate `AuthService` (not extending ProviderSidecarService) | + +--- + +## References + +- dsession: https://github.com/streamingfast/dsession +- Reference tgm-gateway: `resources/tgm-gateway/` +- dmetering: `resources/dmetering/` +- SDS provider sidecar: `provider/sidecar/` +- Proto definitions: `proto/graph/substreams/data_service/provider/v1/` + +--- + +## Completed Items + +All items completed in commit `d313c011`: + +- [x] **Priority 0**: GlobalRepository interface + InMemoryRepository with haxmap +- [x] **Priority 1**: Proto definitions for AuthService, UsageService, SessionService +- [x] **Priority 2**: AuthService gRPC implementation with RAV validation & auth cache +- [x] **Priority 3**: UsageService gRPC implementation with usage tracking +- [x] **Priority 4**: SessionService gRPC implementation with quota enforcement +- [x] **Priority 5**: All services wired into provider sidecar + +**Files Created:** +- `provider/repository/repository.go` - GlobalRepository interface + domain types +- `provider/repository/inmemory.go` - InMemory implementation using haxmap +- `provider/repository/inmemory_test.go` - 29 comprehensive tests +- `provider/auth/service.go` - AuthService with EIP-712 RAV validation +- `provider/auth/service_test.go` - Unit tests +- `provider/usage/service.go` - UsageService for metering +- `provider/usage/service_test.go` - Unit tests +- `provider/session/service.go` - SessionService (BorrowWorker/ReturnWorker/KeepAlive) +- `provider/session/quotas.go` - QuotaConfig with per-payer overrides +- `provider/session/service_test.go` - Unit tests +- `proto/graph/substreams/data_service/sds/auth/v1/auth.proto` +- `proto/graph/substreams/data_service/sds/usage/v1/usage.proto` +- `proto/graph/substreams/data_service/sds/session/v1/session.proto` +- Generated pb + connect code for all services diff --git a/proto/graph/substreams/data_service/sds/auth/v1/auth.proto b/proto/graph/substreams/data_service/sds/auth/v1/auth.proto new file mode 100644 index 0000000..461749d --- /dev/null +++ b/proto/graph/substreams/data_service/sds/auth/v1/auth.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package graph.substreams.data_service.sds.auth.v1; + +import "graph/substreams/data_service/common/v1/types.proto"; + +// AuthService validates incoming payment RAVs for firehose-core plugins. +// The sds:// dauth plugin calls ValidateAuth with the RAV extracted from +// the x-sds-rav gRPC header and receives back auth context (payer address, etc.) +// to populate trusted headers. +service AuthService { + // ValidateAuth validates a SignedRAV and returns auth context for the caller. + // Called by the sds:// dauth plugin when a client connects with an x-sds-rav header. + rpc ValidateAuth(ValidateAuthRequest) returns (ValidateAuthResponse); +} + +message ValidateAuthRequest { + // SignedRAV extracted from the `x-sds-rav` gRPC header + common.v1.SignedRAV payment_rav = 1; + // Client IP address (optional, for logging/rate-limiting) + string ip_address = 2; + // Request path/endpoint (optional) + string path = 3; +} + +message ValidateAuthResponse { + // Payer Ethereum address (0x...) → maps to x-user-id / x-organization-id + string organization_id = 1; + // Optional identifier for sub-key or signer; empty for now + string api_key_id = 2; + // Additional context to pass through as trusted headers + map metadata = 3; +} diff --git a/proto/graph/substreams/data_service/sds/session/v1/session.proto b/proto/graph/substreams/data_service/sds/session/v1/session.proto new file mode 100644 index 0000000..b14cb5b --- /dev/null +++ b/proto/graph/substreams/data_service/sds/session/v1/session.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; + +package graph.substreams.data_service.sds.session.v1; + +import "google/protobuf/duration.proto"; + +// SessionService implements the worker-pool / dsession protocol for firehose-core. +// The dsession tgm:// plugin calls BorrowWorker before handling a request and +// ReturnWorker when done. KeepAlive is called periodically to maintain session state. +service SessionService { + // BorrowWorker acquires a worker slot for a new streaming request. + // Returns resource_exhausted if quota is exceeded. + rpc BorrowWorker(BorrowWorkerRequest) returns (BorrowWorkerResponse); + + // ReturnWorker releases a previously borrowed worker slot. + rpc ReturnWorker(ReturnWorkerRequest) returns (ReturnWorkerResponse); + + // KeepAlive refreshes the session's last-seen timestamp. + rpc KeepAlive(KeepAliveRequest) returns (KeepAliveResponse); +} + +// BorrowWorkerRequest acquires a worker slot for a request. +message BorrowWorkerRequest { + // Service identifier, e.g., "substreams" + string service = 1; + // Payer Ethereum address (from auth context) + string organization_id = 2; + // Optional sub-key or signer identifier; empty for now + string api_key_id = 3; + // Trace ID of the incoming request for deduplication + string trace_id = 4; + // Maximum concurrent workers allowed for this trace ID + int64 max_worker_for_trace_id = 5; +} + +message BorrowWorkerResponse { + // Unique key identifying this worker slot; returned in ReturnWorker + string worker_key = 1; + // Outcome of the borrow attempt + BorrowStatus status = 2; + // Capacity information about the worker pool + WorkerState worker_state = 3; +} + +// BorrowStatus indicates the outcome of a BorrowWorker call. +enum BorrowStatus { + BORROW_STATUS_UNSPECIFIED = 0; + // Worker slot acquired successfully + BORROW_STATUS_BORROWED = 1; + // Quota exceeded; caller should return an unavailable error to the client + BORROW_STATUS_RESOURCE_EXHAUSTED = 2; +} + +// WorkerState describes the current capacity of the worker pool. +message WorkerState { + // Maximum number of workers available in the pool + int64 max_workers = 1; + // Number of workers currently in use + int64 active_workers = 2; +} + +// ReturnWorkerRequest releases a previously borrowed worker slot. +message ReturnWorkerRequest { + // Worker key returned from BorrowWorker + string worker_key = 1; + // Minimum duration the worker should have been active before being released. + // Used to enforce minimum billing windows. + google.protobuf.Duration minimal_worker_life_duration = 2; +} + +message ReturnWorkerResponse {} + +// KeepAliveRequest refreshes the session's last-seen timestamp. +message KeepAliveRequest { + // Worker key identifying the session to keep alive + string worker_key = 1; + // Optional sub-key or signer identifier + string api_key_id = 2; +} + +message KeepAliveResponse {} diff --git a/proto/graph/substreams/data_service/sds/usage/v1/usage.proto b/proto/graph/substreams/data_service/sds/usage/v1/usage.proto new file mode 100644 index 0000000..67f5a85 --- /dev/null +++ b/proto/graph/substreams/data_service/sds/usage/v1/usage.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package graph.substreams.data_service.sds.usage.v1; + +import "google/protobuf/timestamp.proto"; + +// UsageService receives batched metering events from the dmetering tgm:// plugin. +// The plugin batches events with a configurable flush interval (default 100ms). +service UsageService { + // Report receives a batch of metering events from the firehose-core dmetering plugin. + rpc Report(ReportRequest) returns (ReportResponse); +} + +message ReportRequest { + // Batched metering events + repeated Event events = 1; +} + +message ReportResponse { + // Whether the session/key has been revoked + bool revoked = 1; + // If revoked, the reason for revocation + string revocation_reason = 2; +} + +// Event represents a single metered usage event as reported by the dmetering plugin. +message Event { + // Payer Ethereum address (from auth context) + string organization_id = 1; + // Optional sub-key or signer identifier + string api_key_id = 2; + // Client IP address + string ip_address = 3; + // Endpoint called, e.g., "sf.substreams.rpc.v2/Blocks" + string endpoint = 4; + // Network identifier, e.g., "eth-mainnet" + string network = 5; + // Arbitrary metadata + string meta = 7; + // Provider Ethereum address + string provider = 8; + // Output module hash (for Substreams) + string output_module_hash = 9; + // Individual metrics (blocks_count, bytes_sent, etc.) + repeated Metric metrics = 20; + // Event timestamp + google.protobuf.Timestamp timestamp = 30; +} + +// Metric is a single named counter value within a usage event. +message Metric { + string name = 1; + int64 value = 2; +} diff --git a/provider/auth/log_test.go b/provider/auth/log_test.go new file mode 100644 index 0000000..f8c18b4 --- /dev/null +++ b/provider/auth/log_test.go @@ -0,0 +1,11 @@ +package auth_test + +import ( + "github.com/streamingfast/logging" +) + +var zlogTest, _ = logging.PackageLogger("auth_test", "github.com/graphprotocol/substreams-data-service/provider/auth/tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/provider/auth/service.go b/provider/auth/service.go new file mode 100644 index 0000000..1285263 --- /dev/null +++ b/provider/auth/service.go @@ -0,0 +1,193 @@ +// Package auth implements the gRPC AuthService that validates SignedRAVs for +// the sds:// dauth plugin used by firehose-core tier1. +package auth + +import ( + "context" + "fmt" + "time" + + "connectrpc.com/connect" + "github.com/alphadose/haxmap" + "github.com/graphprotocol/substreams-data-service/horizon" + authv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1/authv1connect" + "github.com/graphprotocol/substreams-data-service/sidecar" + "github.com/streamingfast/eth-go" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +var zlog, _ = logging.PackageLogger("sds_auth", "github.com/graphprotocol/substreams-data-service/provider/auth") + +// CollectorAuthorizer checks whether a signer is authorized to act on behalf +// of a payer (e.g., via on-chain delegation). +type CollectorAuthorizer interface { + IsAuthorized(ctx context.Context, payer, signer eth.Address) (bool, error) +} + +// AuthService implements authv1connect.AuthServiceHandler. +// It validates EIP-712 signed RAVs, recovers the signer, and checks +// authorization to return a payer-based auth context. +type AuthService struct { + serviceProvider eth.Address + domain *horizon.Domain + collectorQuerier CollectorAuthorizer + + authCache *haxmap.Map[string, authCacheEntry] +} + +type authCacheEntry struct { + ok bool + expires time.Time +} + +var _ authv1connect.AuthServiceHandler = (*AuthService)(nil) + +// NewAuthService creates a new AuthService with the given configuration. +// collectorQuerier may be nil if on-chain delegation checks are not needed. +func NewAuthService( + serviceProvider eth.Address, + domain *horizon.Domain, + collectorQuerier CollectorAuthorizer, +) *AuthService { + return &AuthService{ + serviceProvider: serviceProvider, + domain: domain, + collectorQuerier: collectorQuerier, + authCache: haxmap.New[string, authCacheEntry](), + } +} + +// ValidateAuth validates a SignedRAV received in the x-sds-rav header and +// returns the payer address as organization_id for use in trusted headers. +func (s *AuthService) ValidateAuth( + ctx context.Context, + req *connect.Request[authv1.ValidateAuthRequest], +) (*connect.Response[authv1.ValidateAuthResponse], error) { + zlog.Debug("ValidateAuth called", + zap.String("ip_address", req.Msg.IpAddress), + zap.String("path", req.Msg.Path), + ) + + if req.Msg.PaymentRav == nil { + return nil, connect.NewError(connect.CodeUnauthenticated, fmt.Errorf("missing payment_rav")) + } + + // Convert proto SignedRAV to horizon SignedRAV for EIP-712 operations. + signedRAV, err := sidecar.ProtoSignedRAVToHorizon(req.Msg.PaymentRav) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("invalid payment_rav: %w", err)) + } + + result, err := s.validateRAV(ctx, signedRAV, req.Msg.IpAddress, req.Msg.Path) + if err != nil { + if authErr, ok := err.(*AuthError); ok { + return nil, connect.NewError(authErr.Code, err) + } + return nil, connect.NewError(connect.CodeUnauthenticated, err) + } + + return connect.NewResponse(&authv1.ValidateAuthResponse{ + OrganizationId: result.OrganizationId, + ApiKeyId: result.ApiKeyId, + Metadata: result.Metadata, + }), nil +} + +// AuthResult holds the result of a successful authentication. +type AuthResult struct { + OrganizationId string + ApiKeyId string + Metadata map[string]string +} + +// AuthError represents an authentication error with an associated connect code. +type AuthError struct { + Code connect.Code + Msg string +} + +func (e *AuthError) Error() string { return e.Msg } + +// validateRAV is the internal implementation used by both ValidateAuth and the SF adapter. +// It validates the SignedRAV and returns the authentication result. +// Errors returned are *AuthError with the appropriate connect code set. +func (s *AuthService) validateRAV(ctx context.Context, signedRAV *horizon.SignedRAV, ipAddress, path string) (*AuthResult, error) { + // Recover the signer from the EIP-712 signature. + signerAddr, err := signedRAV.RecoverSigner(s.domain) + if err != nil { + zlog.Warn("RAV signature verification failed", zap.Error(err)) + return nil, &AuthError{Code: connect.CodeUnauthenticated, Msg: fmt.Sprintf("invalid signature: %v", err)} + } + + payer := signedRAV.Message.Payer + + // Verify that the signer is authorized to act for the payer. + authorized, err := s.isSignerAuthorized(ctx, payer, signerAddr) + if err != nil { + zlog.Warn("authorization check failed", + zap.Stringer("payer", payer), + zap.Stringer("signer", signerAddr), + zap.Error(err), + ) + return nil, &AuthError{Code: connect.CodeInternal, Msg: fmt.Sprintf("authorization check failed: %v", err)} + } + if !authorized { + zlog.Warn("signer not authorized for payer", + zap.Stringer("payer", payer), + zap.Stringer("signer", signerAddr), + ) + return nil, &AuthError{Code: connect.CodePermissionDenied, Msg: fmt.Sprintf("signer %s is not authorized for payer %s", signerAddr.Pretty(), payer.Pretty())} + } + + // Verify that the RAV targets this service provider. + if !sidecar.AddressesEqual(signedRAV.Message.ServiceProvider, s.serviceProvider) { + zlog.Warn("RAV targets a different service provider", + zap.Stringer("expected", s.serviceProvider), + zap.Stringer("got", signedRAV.Message.ServiceProvider), + ) + return nil, &AuthError{Code: connect.CodePermissionDenied, Msg: fmt.Sprintf("RAV targets service provider %s, not %s", signedRAV.Message.ServiceProvider.Pretty(), s.serviceProvider.Pretty())} + } + + zlog.Debug("validateRAV succeeded", + zap.Stringer("payer", payer), + zap.Stringer("signer", signerAddr), + ) + + return &AuthResult{ + OrganizationId: payer.Pretty(), + ApiKeyId: "", + Metadata: map[string]string{ + "signer": signerAddr.Pretty(), + }, + }, nil +} + +// isSignerAuthorized checks whether signer may act on behalf of payer. +// Results are cached for 30 seconds to reduce on-chain RPC calls. +func (s *AuthService) isSignerAuthorized(ctx context.Context, payer, signer eth.Address) (bool, error) { + if sidecar.AddressesEqual(payer, signer) { + return true, nil + } + + if s.collectorQuerier == nil { + return false, nil + } + + key := payer.String() + "|" + signer.String() + now := time.Now() + + if entry, ok := s.authCache.Get(key); ok && now.Before(entry.expires) { + return entry.ok, nil + } + + ok, err := s.collectorQuerier.IsAuthorized(ctx, payer, signer) + if err != nil { + return false, err + } + + s.authCache.Set(key, authCacheEntry{ok: ok, expires: now.Add(30 * time.Second)}) + + return ok, nil +} diff --git a/provider/auth/service_test.go b/provider/auth/service_test.go new file mode 100644 index 0000000..957bdeb --- /dev/null +++ b/provider/auth/service_test.go @@ -0,0 +1,185 @@ +package auth_test + +import ( + "context" + "fmt" + "math/big" + "testing" + + "connectrpc.com/connect" + "github.com/graphprotocol/substreams-data-service/horizon" + authv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1" + "github.com/graphprotocol/substreams-data-service/provider/auth" + "github.com/graphprotocol/substreams-data-service/sidecar" + "github.com/streamingfast/eth-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testDomain is a fixed EIP-712 domain used across tests. +var testDomain = horizon.NewDomain(1337, eth.MustNewAddress("0x1234567890123456789012345678901234567890")) + +// testServiceProvider is the address this sidecar "owns". +var testServiceProvider = eth.MustNewAddress("0xaaaabbbbccccddddeeeeffffaaaabbbbccccdddd") + +// newTestKey generates a deterministic test private key from a single byte. +func newTestKey(seed byte) *eth.PrivateKey { + // Build a deterministic 32-byte private key from a single seed byte. + var rawKey [32]byte + rawKey[31] = seed + hexStr := fmt.Sprintf("%064x", rawKey) + key, err := eth.NewPrivateKey(hexStr) + if err != nil { + panic(err) + } + return key +} + +// buildSignedRAV creates a proto SignedRAV signed by key with payer, serviceProvider = testServiceProvider. +func buildSignedRAV(t *testing.T, payerKey *eth.PrivateKey, signerKey *eth.PrivateKey, serviceProvider eth.Address) *authv1.ValidateAuthRequest { + t.Helper() + + payerAddr := payerKey.PublicKey().Address() + var collectionID horizon.CollectionID + collectionID[0] = 0xCA + + rav := &horizon.RAV{ + CollectionID: collectionID, + Payer: payerAddr, + DataService: eth.MustNewAddress("0x1111111111111111111111111111111111111111"), + ServiceProvider: serviceProvider, + TimestampNs: 1_000_000, + ValueAggregate: big.NewInt(100), + Metadata: nil, + } + + signedRAV, err := horizon.Sign(testDomain, rav, signerKey) + require.NoError(t, err) + + protoRAV := sidecar.HorizonSignedRAVToProto(signedRAV) + + return &authv1.ValidateAuthRequest{ + PaymentRav: protoRAV, + IpAddress: "127.0.0.1", + Path: "/sf.substreams.rpc.v2/Blocks", + } +} + +// --- Tests --- + +func TestAuthService_ValidateAuth_SelfSigned(t *testing.T) { + // When payer == signer no on-chain check is needed. + payerKey := newTestKey(0x01) + payerAddr := payerKey.PublicKey().Address() + + svc := auth.NewAuthService(testServiceProvider, testDomain, nil) + + req := buildSignedRAV(t, payerKey, payerKey, testServiceProvider) + resp, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.NoError(t, err) + assert.Equal(t, payerAddr.Pretty(), resp.Msg.OrganizationId) +} + +func TestAuthService_ValidateAuth_MissingRAV(t *testing.T) { + svc := auth.NewAuthService(testServiceProvider, testDomain, nil) + + _, err := svc.ValidateAuth(context.Background(), connect.NewRequest(&authv1.ValidateAuthRequest{ + PaymentRav: nil, + })) + + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeUnauthenticated, connectErr.Code()) +} + +func TestAuthService_ValidateAuth_WrongServiceProvider(t *testing.T) { + payerKey := newTestKey(0x02) + differentProvider := eth.MustNewAddress("0x9999999999999999999999999999999999999999") + + svc := auth.NewAuthService(testServiceProvider, testDomain, nil) + + // Build RAV targeting a *different* service provider. + req := buildSignedRAV(t, payerKey, payerKey, differentProvider) + _, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodePermissionDenied, connectErr.Code()) +} + +func TestAuthService_ValidateAuth_UnauthorizedSigner(t *testing.T) { + payerKey := newTestKey(0x03) + signerKey := newTestKey(0x04) // different from payer, not authorized + + // collectorQuerier that always returns false (unauthorized). + svc := auth.NewAuthService(testServiceProvider, testDomain, &mockAuthorizer{authorized: false}) + + req := buildSignedRAV(t, payerKey, signerKey, testServiceProvider) + _, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodePermissionDenied, connectErr.Code()) +} + +func TestAuthService_ValidateAuth_AuthorizedDelegateSigner(t *testing.T) { + payerKey := newTestKey(0x05) + signerKey := newTestKey(0x06) // different from payer but authorized on-chain + + payerAddr := payerKey.PublicKey().Address() + + // collectorQuerier that always returns true (authorized). + svc := auth.NewAuthService(testServiceProvider, testDomain, &mockAuthorizer{authorized: true}) + + req := buildSignedRAV(t, payerKey, signerKey, testServiceProvider) + resp, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.NoError(t, err) + assert.Equal(t, payerAddr.Pretty(), resp.Msg.OrganizationId) + assert.NotEmpty(t, resp.Msg.Metadata["signer"]) +} + +func TestAuthService_ValidateAuth_NilCollectorQuerier_UnauthorizedSigner(t *testing.T) { + // When collectorQuerier is nil, only self-signed RAVs are authorized. + payerKey := newTestKey(0x07) + signerKey := newTestKey(0x08) + + svc := auth.NewAuthService(testServiceProvider, testDomain, nil) + + req := buildSignedRAV(t, payerKey, signerKey, testServiceProvider) + _, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodePermissionDenied, connectErr.Code()) +} + +func TestAuthService_ValidateAuth_AuthorizerError(t *testing.T) { + payerKey := newTestKey(0x09) + signerKey := newTestKey(0x0a) + + svc := auth.NewAuthService(testServiceProvider, testDomain, &mockAuthorizer{err: assert.AnError}) + + req := buildSignedRAV(t, payerKey, signerKey, testServiceProvider) + _, err := svc.ValidateAuth(context.Background(), connect.NewRequest(req)) + + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeInternal, connectErr.Code()) +} + +// mockAuthorizer implements auth.CollectorAuthorizer for testing. +type mockAuthorizer struct { + authorized bool + err error +} + +func (m *mockAuthorizer) IsAuthorized(_ context.Context, _, _ eth.Address) (bool, error) { + return m.authorized, m.err +} diff --git a/provider/plugin/auth.go b/provider/plugin/auth.go new file mode 100644 index 0000000..7d5ca01 --- /dev/null +++ b/provider/plugin/auth.go @@ -0,0 +1,173 @@ +package plugin + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strings" + + "connectrpc.com/connect" + commonv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/common/v1" + authv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1/authv1connect" + "github.com/streamingfast/dauth" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" +) + +// RegisterAuth registers the "sds" scheme with dauth. +// The config URL format is: +// +// sds://host:port?plaintext=true&insecure=true&dev-api-key= +// +// The plugin connects to the provider sidecar's AuthService for RAV validation. +// All business logic (service provider, escrow, etc.) is on the server side. +func RegisterAuth() { + dauth.Register("sds", func(config string, logger *zap.Logger) (dauth.Authenticator, error) { + configExpanded := os.ExpandEnv(config) + + baseCfg, vals, err := parseBaseConfig(configExpanded) + if err != nil { + return nil, fmt.Errorf("failed to parse auth config %q: %w", config, err) + } + + // Validate known parameters + for k := range vals { + switch k { + case "insecure", "plaintext", "dev-api-key": + // Known parameters + default: + return nil, fmt.Errorf("unknown query parameter: %s", k) + } + } + + devAPIKey := vals.Get("dev-api-key") + + return newAuthenticator(baseCfg, devAPIKey, logger) + }) +} + +// authenticator implements dauth.Authenticator by calling the provider sidecar. +type authenticator struct { + client authv1connect.AuthServiceClient + devAPIKey string + logger *zap.Logger +} + +func newAuthenticator(cfg *baseConfig, devAPIKey string, logger *zap.Logger) (dauth.Authenticator, error) { + httpClient := newHTTPClient(cfg) + + client := authv1connect.NewAuthServiceClient( + httpClient, + cfg.baseURL(), + ) + + return &authenticator{ + client: client, + devAPIKey: devAPIKey, + logger: logger.Named("sds-auth"), + }, nil +} + +// Authenticate implements dauth.Authenticator. +func (a *authenticator) Authenticate(ctx context.Context, path string, headers map[string][]string, ipAddress string) (context.Context, error) { + a.logger.Debug("Authenticate called", + zap.String("path", path), + zap.Int("header_count", len(headers)), + zap.String("ip", ipAddress), + ) + + // Convert headers to lowercase for case-insensitive lookup + lowerHeaders := make(map[string][]string) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + // Check for dev mode API key first (client-side bypass for local testing) + if a.devAPIKey != "" { + if apiKeys := lowerHeaders["x-api-key"]; len(apiKeys) > 0 && apiKeys[0] == a.devAPIKey { + a.logger.Debug("dev mode auth bypass", zap.String("api_key", apiKeys[0])) + return dauth.WithTrustedHeaders(ctx, dauth.TrustedHeaders{ + dauth.HeaderOrganizationID: "dev-test-org", + dauth.HeaderApiKeyID: "dev-api-key", + dauth.HeaderIP: ipAddress, + }), nil + } + } + + // Look for the x-sds-rav header containing the SignedRAV + ravHeaders, ok := lowerHeaders["x-sds-rav"] + if !ok || len(ravHeaders) == 0 { + a.logger.Warn("missing x-sds-rav header") + return ctx, connect.NewError(connect.CodeUnauthenticated, fmt.Errorf("missing x-sds-rav header (or valid x-api-key)")) + } + + // Decode the SignedRAV from base64-encoded protobuf + signedRAV, err := decodeRAVFromHeader(ravHeaders[0]) + if err != nil { + a.logger.Warn("failed to decode RAV from header", zap.Error(err)) + return ctx, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("invalid x-sds-rav header: %w", err)) + } + + // Call the provider sidecar's AuthService to validate the RAV + req := connect.NewRequest(&authv1.ValidateAuthRequest{ + PaymentRav: signedRAV, + IpAddress: ipAddress, + Path: path, + }) + + resp, err := a.client.ValidateAuth(ctx, req) + if err != nil { + a.logger.Warn("RAV validation failed", zap.Error(err)) + // Pass through the error from the server (it already has proper connect codes) + return ctx, err + } + + // Build trusted headers from the response + trustedHeaders := dauth.TrustedHeaders{ + dauth.HeaderOrganizationID: resp.Msg.OrganizationId, + dauth.HeaderIP: ipAddress, + } + + if resp.Msg.ApiKeyId != "" { + trustedHeaders[dauth.HeaderApiKeyID] = resp.Msg.ApiKeyId + } + + for k, v := range resp.Msg.Metadata { + trustedHeaders[k] = v + } + + a.logger.Debug("authentication successful", + zap.String("organization_id", resp.Msg.OrganizationId), + zap.Int("header_count", len(trustedHeaders)), + ) + + return dauth.WithTrustedHeaders(ctx, trustedHeaders), nil +} + +// Ready implements dauth.Authenticator. +func (a *authenticator) Ready(ctx context.Context) bool { + return true +} + +// decodeRAVFromHeader decodes a SignedRAV from its base64-encoded protobuf format. +func decodeRAVFromHeader(headerValue string) (*commonv1.SignedRAV, error) { + // Try base64 decoding first (standard format) + data, err := base64.StdEncoding.DecodeString(headerValue) + if err != nil { + // Try raw base64 URL encoding + data, err = base64.RawURLEncoding.DecodeString(headerValue) + if err != nil { + return nil, err + } + } + + // Parse as protobuf SignedRAV + var protoRAV commonv1.SignedRAV + if err := proto.Unmarshal(data, &protoRAV); err != nil { + return nil, err + } + + return &protoRAV, nil +} diff --git a/provider/plugin/metering.go b/provider/plugin/metering.go new file mode 100644 index 0000000..26c31fb --- /dev/null +++ b/provider/plugin/metering.go @@ -0,0 +1,202 @@ +package plugin + +import ( + "context" + "fmt" + "os" + "time" + + "connectrpc.com/connect" + usagev1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect" + "github.com/streamingfast/dmetering" + "github.com/streamingfast/shutter" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// RegisterMetering registers the "sds" scheme with dmetering. +// The config URL format is: +// +// sds://host:port?plaintext=true&insecure=true&network=&buffer=&delay= +// +// The plugin connects to the provider sidecar's UsageService for metering. +func RegisterMetering() { + dmetering.Register("sds", func(config string, logger *zap.Logger) (dmetering.EventEmitter, error) { + configExpanded := os.ExpandEnv(config) + + baseCfg, vals, err := parseBaseConfig(configExpanded) + if err != nil { + return nil, fmt.Errorf("failed to parse metering config %q: %w", config, err) + } + + // Validate known parameters + for k := range vals { + switch k { + case "insecure", "plaintext", "network", "buffer", "delay", "panic-on-drop", "panicOnDrop": + // Known parameters + default: + return nil, fmt.Errorf("unknown query parameter: %s", k) + } + } + + network := vals.Get("network") + if network == "" { + return nil, fmt.Errorf("network is required (as query param)") + } + + bufferSize, err := parseUint64(vals.Get("buffer"), 10000) + if err != nil { + return nil, fmt.Errorf("invalid buffer: %w", err) + } + + delay, err := parseInt64(vals.Get("delay"), 100) + if err != nil { + return nil, fmt.Errorf("invalid delay: %w", err) + } + + panicOnDrop := vals.Get("panicOnDrop") == "true" || vals.Get("panic-on-drop") == "true" + + return newMeteringEmitter(baseCfg, network, bufferSize, time.Duration(delay)*time.Millisecond, panicOnDrop, logger) + }) +} + +// meteringEmitter implements dmetering.EventEmitter by calling the provider sidecar. +type meteringEmitter struct { + *shutter.Shutter + client usagev1connect.UsageServiceClient + network string + buffer chan dmetering.Event + activeBatch []*usagev1.Event + done chan bool + panicOnDrop bool + delay time.Duration + logger *zap.Logger +} + +func newMeteringEmitter(cfg *baseConfig, network string, bufferSize uint64, delay time.Duration, panicOnDrop bool, logger *zap.Logger) (dmetering.EventEmitter, error) { + httpClient := newHTTPClient(cfg) + + client := usagev1connect.NewUsageServiceClient( + httpClient, + cfg.baseURL(), + ) + + e := &meteringEmitter{ + Shutter: shutter.New(), + client: client, + network: network, + buffer: make(chan dmetering.Event, bufferSize), + done: make(chan bool, 1), + panicOnDrop: panicOnDrop, + delay: delay, + logger: logger.Named("sds-metering"), + } + + e.OnTerminating(func(err error) { + e.logger.Info("received shutdown signal, waiting for launch loop to end", zap.Error(err)) + <-e.done + e.flushAndClose() + }) + + go e.launch() + + return e, nil +} + +func (e *meteringEmitter) launch() { + ticker := time.NewTicker(e.delay) + for { + select { + case <-e.Terminating(): + e.done <- true + return + case <-ticker.C: + e.emit(e.activeBatch) + e.activeBatch = nil + case ev := <-e.buffer: + ev.Network = e.network + e.activeBatch = append(e.activeBatch, e.eventToProto(ev)) + } + } +} + +func (e *meteringEmitter) flushAndClose() { + close(e.buffer) + + t0 := time.Now() + e.logger.Info("waiting for event flush to complete", zap.Int("count", len(e.buffer))) + defer func() { + e.logger.Info("event flushed", zap.Duration("elapsed", time.Since(t0))) + }() + + for { + ev, ok := <-e.buffer + if !ok { + e.logger.Info("sending last events", zap.Int("count", len(e.activeBatch))) + e.emit(e.activeBatch) + return + } + ev.Network = e.network + e.activeBatch = append(e.activeBatch, e.eventToProto(ev)) + } +} + +// Emit implements dmetering.EventEmitter. +func (e *meteringEmitter) Emit(_ context.Context, ev dmetering.Event) { + if ev.Endpoint == "" { + e.logger.Warn("events must contain endpoint, dropping event", zap.Object("event", ev)) + return + } + + if e.IsTerminating() { + e.logger.Warn("emitter is shutting down cannot track event", zap.Object("event", ev)) + return + } + + select { + case e.buffer <- ev: + default: + if e.panicOnDrop { + panic(fmt.Errorf("failed to queue metric channel is full")) + } + e.logger.Warn("dropping event, buffer full") + } +} + +func (e *meteringEmitter) emit(events []*usagev1.Event) { + if len(events) == 0 { + return + } + e.logger.Debug("tracking events", zap.Int("count", len(events))) + + req := connect.NewRequest(&usagev1.ReportRequest{ + Events: events, + }) + + _, err := e.client.Report(context.Background(), req) + if err != nil { + e.logger.Warn("failed to emit events", zap.Error(err)) + } +} + +// eventToProto converts a dmetering.Event to our usagev1.Event. +func (e *meteringEmitter) eventToProto(ev dmetering.Event) *usagev1.Event { + protoEvent := &usagev1.Event{ + OrganizationId: ev.OrganizationID, + ApiKeyId: ev.ApiKeyID, + Endpoint: ev.Endpoint, + Network: ev.Network, + Timestamp: timestamppb.New(ev.Timestamp), + Metrics: make([]*usagev1.Metric, 0, len(ev.Metrics)), + } + + for name, value := range ev.Metrics { + protoEvent.Metrics = append(protoEvent.Metrics, &usagev1.Metric{ + Name: name, + Value: int64(value), + }) + } + + return protoEvent +} diff --git a/provider/plugin/plugin.go b/provider/plugin/plugin.go new file mode 100644 index 0000000..53b783b --- /dev/null +++ b/provider/plugin/plugin.go @@ -0,0 +1,137 @@ +// Package plugin provides SDS plugin registration for firehose-core. +// It registers "sds" scheme handlers with dauth, dsession, and dmetering +// that connect to an SDS provider sidecar via gRPC/Connect. +// +// The plugins are gRPC clients - all business logic (service provider address, +// escrow address, etc.) is configured on the provider sidecar server side. +// Plugin configuration only needs connection parameters. +// +// Usage in firehose-core: +// +// common-auth-plugin: "sds://localhost:9001?plaintext=true" +// common-session-plugin: "sds://localhost:9001?plaintext=true" +// common-metering-plugin: "sds://localhost:9001?plaintext=true&network=my-network" +package plugin + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/streamingfast/logging" + "golang.org/x/net/http2" +) + +var zlog, _ = logging.PackageLogger("sds_plugin", "github.com/graphprotocol/substreams-data-service/provider/plugin") + +// baseConfig holds common connection configuration for all plugins. +type baseConfig struct { + Endpoint string // host:port + Insecure bool // skip TLS certificate verification + Plaintext bool // use plaintext (no TLS) +} + +// parseBaseConfig parses common connection parameters from a URL. +func parseBaseConfig(configURL string) (*baseConfig, url.Values, error) { + u, err := url.Parse(configURL) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse URL: %w", err) + } + + if u.Scheme != "sds" { + return nil, nil, fmt.Errorf("invalid scheme %q, expected 'sds'", u.Scheme) + } + + hostname := u.Hostname() + if hostname == "" { + return nil, nil, fmt.Errorf("hostname is required, e.g. sds://localhost:9001") + } + + port := u.Port() + if port == "" { + port = "443" + } + + cfg := &baseConfig{ + Endpoint: fmt.Sprintf("%s:%s", hostname, port), + } + + vals := u.Query() + + if vals.Get("insecure") == "true" { + cfg.Insecure = true + } + + if vals.Get("plaintext") == "true" { + cfg.Plaintext = true + } + + return cfg, vals, nil +} + +// newHTTPClient creates an HTTP client configured for the given base config. +// For plaintext connections, it uses HTTP/2 cleartext (h2c). +// For TLS connections, it uses standard HTTPS. +func newHTTPClient(cfg *baseConfig) *http.Client { + if cfg.Plaintext { + // Use HTTP/2 cleartext (h2c) for plaintext connections + return &http.Client{ + Transport: &http2.Transport{ + AllowHTTP: true, + DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } + } + + // Use standard HTTPS with optional certificate verification skip + tlsConfig := &tls.Config{} + if cfg.Insecure { + tlsConfig.InsecureSkipVerify = true + } + + return &http.Client{ + Transport: &http2.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +// baseURL returns the base URL for the given config. +func (cfg *baseConfig) baseURL() string { + if cfg.Plaintext { + return "http://" + cfg.Endpoint + } + return "https://" + cfg.Endpoint +} + +// parseDuration parses a duration string, returning the default if empty. +func parseDuration(s string, defaultVal time.Duration) (time.Duration, error) { + if s == "" { + return defaultVal, nil + } + return time.ParseDuration(s) +} + +// parseInt64 parses an int64 string, returning the default if empty. +func parseInt64(s string, defaultVal int64) (int64, error) { + if s == "" { + return defaultVal, nil + } + return strconv.ParseInt(s, 10, 64) +} + +// parseUint64 parses a uint64 string, returning the default if empty. +func parseUint64(s string, defaultVal uint64) (uint64, error) { + if s == "" { + return defaultVal, nil + } + return strconv.ParseUint(s, 10, 64) +} diff --git a/provider/plugin/register.go b/provider/plugin/register.go new file mode 100644 index 0000000..1fe4902 --- /dev/null +++ b/provider/plugin/register.go @@ -0,0 +1,18 @@ +package plugin + +// Register registers all SDS plugins with their respective packages. +// This should be called during init() in firehose-core. +// +// Usage in firehose-core config after registration: +// +// common-auth-plugin: "sds://localhost:9001?plaintext=true" +// common-session-plugin: "sds://localhost:9001?plaintext=true" +// common-metering-plugin: "sds://localhost:9001?plaintext=true&network=my-network" +// +// All three plugins connect to the same provider sidecar endpoint. +// The sidecar handles all business logic (service provider, escrow, quotas, etc.). +func Register() { + RegisterAuth() + RegisterSession() + RegisterMetering() +} diff --git a/provider/plugin/session.go b/provider/plugin/session.go new file mode 100644 index 0000000..d0d046d --- /dev/null +++ b/provider/plugin/session.go @@ -0,0 +1,347 @@ +package plugin + +import ( + "context" + "fmt" + "os" + "time" + + "connectrpc.com/connect" + "github.com/alphadose/haxmap" + sessionv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect" + "github.com/streamingfast/dsession" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/durationpb" +) + +// RegisterSession registers the "sds" scheme with dsession. +// The config URL format is: +// +// sds://host:port?plaintext=true&insecure=true&keep-alive-delay=20s&minimal-worker-life-duration=5s +// +// The plugin connects to the provider sidecar's SessionService for worker pool management. +// All quota configuration is on the server side. +func RegisterSession() { + dsession.Register("sds", func(config string, logger *zap.Logger) (dsession.SessionPool, error) { + configExpanded := os.ExpandEnv(config) + + baseCfg, vals, err := parseBaseConfig(configExpanded) + if err != nil { + return nil, fmt.Errorf("failed to parse session config %q: %w", config, err) + } + + // Validate known parameters + for k := range vals { + switch k { + case "insecure", "plaintext", "keep-alive-delay", "minimal-worker-life-duration": + // Known parameters + default: + return nil, fmt.Errorf("unknown query parameter: %s", k) + } + } + + keepAliveDelay, err := parseDuration(vals.Get("keep-alive-delay"), 20*time.Second) + if err != nil { + return nil, fmt.Errorf("invalid keep-alive-delay: %w", err) + } + + minimalWorkerLifeDuration, err := parseDuration(vals.Get("minimal-worker-life-duration"), 5*time.Second) + if err != nil { + return nil, fmt.Errorf("invalid minimal-worker-life-duration: %w", err) + } + + return newSessionPool(baseCfg, keepAliveDelay, minimalWorkerLifeDuration, logger) + }) +} + +// sessionInfo tracks a borrowed session and its workers. +type sessionInfo struct { + organizationID string + apiKeyID string + traceID string + workers *haxmap.Map[string, struct{}] + closer chan struct{} +} + +// sessionPool implements dsession.SessionPool by calling the provider sidecar. +type sessionPool struct { + client sessionv1connect.SessionServiceClient + logger *zap.Logger + keepAliveDelay time.Duration + minimalWorkerLifeDuration time.Duration + + sessions *haxmap.Map[string, *sessionInfo] +} + +func newSessionPool(cfg *baseConfig, keepAliveDelay, minimalWorkerLifeDuration time.Duration, logger *zap.Logger) (dsession.SessionPool, error) { + httpClient := newHTTPClient(cfg) + + client := sessionv1connect.NewSessionServiceClient( + httpClient, + cfg.baseURL(), + ) + + return &sessionPool{ + client: client, + logger: logger.Named("sds-session"), + keepAliveDelay: keepAliveDelay, + minimalWorkerLifeDuration: minimalWorkerLifeDuration, + sessions: haxmap.New[string, *sessionInfo](), + }, nil +} + +// Get implements dsession.SessionPool. +func (p *sessionPool) Get(ctx context.Context, serviceName string, organizationID string, apiKeyID string, traceID string, onError func(error)) (string, error) { + req := connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: serviceName, + OrganizationId: organizationID, + ApiKeyId: apiKeyID, + TraceId: traceID, + }) + + resp, err := p.client.BorrowWorker(ctx, req) + if err != nil { + // Map connect errors to dsession errors + switch connect.CodeOf(err) { + case connect.CodeUnavailable: + return "", fmt.Errorf("%w: %s", dsession.ErrUnavailable, err.Error()) + case connect.CodePermissionDenied: + return "", fmt.Errorf("%w: %s", dsession.ErrPermissionDenied, err.Error()) + case connect.CodeResourceExhausted: + return "", fmt.Errorf("%w: %s", dsession.ErrQuotaExceeded, err.Error()) + } + return "", fmt.Errorf("failed to borrow session: %w", err) + } + + workerKey := resp.Msg.WorkerKey + workerStatus := resp.Msg.Status + + details := "" + if maxWorkers := resp.Msg.WorkerState.GetMaxWorkers(); maxWorkers != 0 { + details = fmt.Sprintf(" (active sessions: %d/%d)", resp.Msg.WorkerState.GetActiveWorkers(), maxWorkers) + } + + if workerStatus == sessionv1.BorrowStatus_BORROW_STATUS_RESOURCE_EXHAUSTED { + p.logger.Debug("session pool is exhausted", zap.String("status", workerStatus.String()), zap.String("details", details)) + return "", fmt.Errorf("%w%s", dsession.ErrConcurrentStreamLimitExceeded, details) + } + + // Start keep-alive for borrowed sessions + if workerStatus == sessionv1.BorrowStatus_BORROW_STATUS_BORROWED { + done := make(chan struct{}) + p.sessions.Set(workerKey, &sessionInfo{ + organizationID: organizationID, + apiKeyID: apiKeyID, + traceID: traceID, + workers: haxmap.New[string, struct{}](), + closer: done, + }) + + p.startKeepAlive(ctx, done, workerKey, onError) + } + + p.logger.Debug("borrowed request worker", zap.String("worker_key", workerKey)) + + return workerKey, nil +} + +// Release implements dsession.SessionPool. +func (p *sessionPool) Release(sessionKey string) { + go func() { + info, ok := p.sessions.Get(sessionKey) + if !ok { + return + } + + // Collect workers to release + var workersToRelease []string + info.workers.ForEach(func(workerKey string, _ struct{}) bool { + workersToRelease = append(workersToRelease, workerKey) + return true + }) + done := info.closer + p.sessions.Del(sessionKey) + + // Close the done channel + if done != nil { + close(done) + } + + // Release all workers + for _, workerKey := range workersToRelease { + p.releaseWorkerInternal(workerKey) + } + + // Return the session worker + req := connect.NewRequest(&sessionv1.ReturnWorkerRequest{ + WorkerKey: sessionKey, + MinimalWorkerLifeDuration: durationpb.New(p.minimalWorkerLifeDuration), + }) + resp, err := p.client.ReturnWorker(context.Background(), req) + p.logger.Debug("returned request worker", zap.String("key", sessionKey), zap.Any("status", resp), zap.Error(err)) + }() +} + +// GetWorker implements dsession.SessionPool. +func (p *sessionPool) GetWorker(ctx context.Context, serviceName string, sessionKey string, maxWorkersPerSession int) (string, error) { + // Look up session info + info, ok := p.sessions.Get(sessionKey) + if !ok { + return "", fmt.Errorf("%w: session key %s not found", dsession.ErrSessionNotFound, sessionKey) + } + organizationID := info.organizationID + apiKeyID := info.apiKeyID + traceID := info.traceID + + req := connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: serviceName, + OrganizationId: organizationID, + ApiKeyId: apiKeyID, + TraceId: traceID, + MaxWorkerForTraceId: int64(maxWorkersPerSession), + }) + + resp, err := p.client.BorrowWorker(ctx, req) + if err != nil { + // Map connect errors to dsession errors + switch connect.CodeOf(err) { + case connect.CodeNotFound: + return "", fmt.Errorf("%w: session not found", dsession.ErrSessionNotFound) + case connect.CodeResourceExhausted: + return "", fmt.Errorf("%w: maximum workers per session exceeded", dsession.ErrWorkersLimitExceeded) + } + return "", fmt.Errorf("failed to borrow worker: %w", err) + } + + workerKey := resp.Msg.WorkerKey + workerStatus := resp.Msg.Status + + details := "" + if maxWorkers := resp.Msg.WorkerState.GetMaxWorkers(); maxWorkers != 0 { + details = fmt.Sprintf(" (active workers: %d/%d)", resp.Msg.WorkerState.GetActiveWorkers(), maxWorkers) + } + + if workerStatus == sessionv1.BorrowStatus_BORROW_STATUS_RESOURCE_EXHAUSTED { + p.logger.Debug("worker limit exceeded", zap.String("worker_key", workerKey), zap.String("status", workerStatus.String())) + return "", fmt.Errorf("%w%s", dsession.ErrWorkersLimitExceeded, details) + } + + // Track this worker under the session + info, ok = p.sessions.Get(sessionKey) + if !ok { + // Session was released, immediately release the newly acquired worker + go p.releaseWorkerInternal(workerKey) + return "", fmt.Errorf("%w: session key %s was released", dsession.ErrSessionNotFound, sessionKey) + } + info.workers.Set(workerKey, struct{}{}) + + p.logger.Info("borrowed worker", + zap.String("organization_id", organizationID), + zap.String("api_key_id", apiKeyID), + zap.String("service_name", serviceName), + zap.String("trace_id", traceID), + zap.String("worker_key", workerKey), + zap.String("session_key", sessionKey), + zap.Int("max_workers", maxWorkersPerSession), + ) + + return workerKey, nil +} + +// ReleaseWorker implements dsession.SessionPool. +func (p *sessionPool) ReleaseWorker(workerKey string) { + // Remove worker from session tracking + p.sessions.ForEach(func(_ string, info *sessionInfo) bool { + info.workers.Del(workerKey) + return true + }) + + // Release worker in a goroutine + go p.releaseWorkerInternal(workerKey) +} + +func (p *sessionPool) releaseWorkerInternal(workerKey string) { + req := connect.NewRequest(&sessionv1.ReturnWorkerRequest{ + WorkerKey: workerKey, + }) + resp, err := p.client.ReturnWorker(context.Background(), req) + p.logger.Debug("returned worker", zap.String("key", workerKey), zap.Any("status", resp), zap.Error(err)) +} + +// startKeepAlive starts the keep-alive goroutine for a borrowed session. +func (p *sessionPool) startKeepAlive(ctx context.Context, done <-chan struct{}, sessionKey string, onError func(error)) { + go func() { + ticker := time.NewTicker(p.keepAliveDelay) + defer ticker.Stop() + + errorMode := false + + for { + select { + case <-ctx.Done(): + return + case <-done: + return + case <-ticker.C: + info, ok := p.sessions.Get(sessionKey) + if !ok { + return + } + apiKeyID := info.apiKeyID + var workerKeys []string + info.workers.ForEach(func(workerKey string, _ struct{}) bool { + workerKeys = append(workerKeys, workerKey) + return true + }) + + hadError := false + + // Keep session alive + req := connect.NewRequest(&sessionv1.KeepAliveRequest{ + WorkerKey: sessionKey, + ApiKeyId: apiKeyID, + }) + _, err := p.client.KeepAlive(ctx, req) + if err != nil { + hadError = true + p.logger.Error("failed to call keep session alive", zap.String("session_key", sessionKey), zap.Error(err)) + if onError != nil { + switch connect.CodeOf(err) { + case connect.CodePermissionDenied: + onError(fmt.Errorf("%w: %s", dsession.ErrPermissionDenied, err.Error())) + return + case connect.CodeResourceExhausted: + onError(fmt.Errorf("%w: %s", dsession.ErrQuotaExceeded, err.Error())) + return + } + } + } + + // Keep workers alive + for _, workerKey := range workerKeys { + req := connect.NewRequest(&sessionv1.KeepAliveRequest{ + WorkerKey: workerKey, + ApiKeyId: apiKeyID, + }) + _, err := p.client.KeepAlive(ctx, req) + if err != nil { + hadError = true + p.logger.Error("failed to call keep worker alive", zap.String("worker_key", workerKey), zap.Error(err)) + } + } + + // On error, switch to 1-second retry interval + if hadError && !errorMode { + ticker.Reset(time.Second) + errorMode = true + p.logger.Info("switched to error recovery mode with 1 second interval", zap.String("session_key", sessionKey)) + } else if !hadError && errorMode { + ticker.Reset(p.keepAliveDelay) + errorMode = false + p.logger.Info("recovered from error, switched back to normal interval", zap.String("session_key", sessionKey)) + } + } + } + }() +} diff --git a/provider/repository/inmemory.go b/provider/repository/inmemory.go new file mode 100644 index 0000000..7e06b7b --- /dev/null +++ b/provider/repository/inmemory.go @@ -0,0 +1,264 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/alphadose/haxmap" +) + +// newStringMap creates a new haxmap keyed by string. +func newStringMap[V any]() *haxmap.Map[string, V] { + return haxmap.New[string, V]() +} + +// InMemoryRepository is an in-memory implementation of GlobalRepository. +// It is safe for concurrent use. +type InMemoryRepository struct { + sessions *haxmap.Map[string, *Session] + workers *haxmap.Map[string, *Worker] + quotas *haxmap.Map[string, *QuotaUsage] + + // usageMu guards usageSlice append operations; haxmap does not natively + // support atomic append-to-slice, so we use a thin mutex here. + usageMu sync.Mutex + usage *haxmap.Map[string, []*UsageEvent] +} + +var _ GlobalRepository = (*InMemoryRepository)(nil) + +// NewInMemoryRepository creates and returns a new InMemoryRepository. +func NewInMemoryRepository() *InMemoryRepository { + return &InMemoryRepository{ + sessions: newStringMap[*Session](), + workers: newStringMap[*Worker](), + quotas: newStringMap[*QuotaUsage](), + usage: newStringMap[[]*UsageEvent](), + } +} + +// --- Session management --- + +// SessionCreate stores a new session. Returns an error if the session ID already exists. +func (r *InMemoryRepository) SessionCreate(_ context.Context, session *Session) error { + if session == nil { + return fmt.Errorf("session must not be nil") + } + if session.ID == "" { + return fmt.Errorf("session ID must not be empty") + } + if _, loaded := r.sessions.GetOrSet(session.ID, session); loaded { + return fmt.Errorf("session %q already exists", session.ID) + } + return nil +} + +// SessionGet retrieves a session by ID. +func (r *InMemoryRepository) SessionGet(_ context.Context, sessionID string) (*Session, error) { + s, ok := r.sessions.Get(sessionID) + if !ok { + return nil, fmt.Errorf("session %q not found", sessionID) + } + return s, nil +} + +// SessionUpdate replaces the stored session. Returns an error if the session does not exist. +func (r *InMemoryRepository) SessionUpdate(_ context.Context, session *Session) error { + if session == nil { + return fmt.Errorf("session must not be nil") + } + if _, ok := r.sessions.Get(session.ID); !ok { + return fmt.Errorf("session %q not found", session.ID) + } + r.sessions.Set(session.ID, session) + return nil +} + +// SessionDelete removes a session by ID. +func (r *InMemoryRepository) SessionDelete(_ context.Context, sessionID string) error { + if _, ok := r.sessions.Get(sessionID); !ok { + return fmt.Errorf("session %q not found", sessionID) + } + r.sessions.Del(sessionID) + return nil +} + +// SessionList returns all sessions that match the given filter. +func (r *InMemoryRepository) SessionList(_ context.Context, filter SessionFilter) ([]*Session, error) { + var result []*Session + r.sessions.ForEach(func(_ string, s *Session) bool { + if filter.PayerAddress != nil && s.PayerAddress != *filter.PayerAddress { + return true + } + if filter.Status != nil && s.Status != *filter.Status { + return true + } + if filter.CreatedAfter != nil && !s.CreatedAt.After(*filter.CreatedAfter) { + return true + } + result = append(result, s) + return true + }) + return result, nil +} + +// SessionGetByPayer returns all sessions for the given payer address. +func (r *InMemoryRepository) SessionGetByPayer(ctx context.Context, payer string) ([]*Session, error) { + return r.SessionList(ctx, SessionFilter{PayerAddress: &payer}) +} + +// --- Worker management --- + +// WorkerCreate stores a new worker. Returns an error if the worker key already exists. +func (r *InMemoryRepository) WorkerCreate(_ context.Context, worker *Worker) error { + if worker == nil { + return fmt.Errorf("worker must not be nil") + } + if worker.Key == "" { + return fmt.Errorf("worker key must not be empty") + } + if _, loaded := r.workers.GetOrSet(worker.Key, worker); loaded { + return fmt.Errorf("worker %q already exists", worker.Key) + } + return nil +} + +// WorkerGet retrieves a worker by its key. +func (r *InMemoryRepository) WorkerGet(_ context.Context, workerKey string) (*Worker, error) { + w, ok := r.workers.Get(workerKey) + if !ok { + return nil, fmt.Errorf("worker %q not found", workerKey) + } + return w, nil +} + +// WorkerDelete removes a worker by its key. +func (r *InMemoryRepository) WorkerDelete(_ context.Context, workerKey string) error { + if _, ok := r.workers.Get(workerKey); !ok { + return fmt.Errorf("worker %q not found", workerKey) + } + r.workers.Del(workerKey) + return nil +} + +// WorkerListBySession returns all workers associated with a session. +func (r *InMemoryRepository) WorkerListBySession(_ context.Context, sessionID string) ([]*Worker, error) { + var result []*Worker + r.workers.ForEach(func(_ string, w *Worker) bool { + if w.SessionID == sessionID { + result = append(result, w) + } + return true + }) + return result, nil +} + +// WorkerCountByPayer returns the number of active workers for a given payer address. +func (r *InMemoryRepository) WorkerCountByPayer(_ context.Context, payer string) (int, error) { + count := 0 + r.workers.ForEach(func(_ string, w *Worker) bool { + if w.PayerAddress == payer { + count++ + } + return true + }) + return count, nil +} + +// --- Quota management --- + +// QuotaGet returns the current quota usage for a payer. Returns a zero-value +// QuotaUsage (not an error) when no entry exists yet. +func (r *InMemoryRepository) QuotaGet(_ context.Context, payer string) (*QuotaUsage, error) { + q, ok := r.quotas.Get(payer) + if !ok { + return &QuotaUsage{ + PayerAddress: payer, + LastUpdated: time.Now(), + }, nil + } + return q, nil +} + +// QuotaIncrement atomically increments the quota counters for a payer. +func (r *InMemoryRepository) QuotaIncrement(_ context.Context, payer string, sessions int, workers int) error { + q, _ := r.quotas.GetOrCompute(payer, func() *QuotaUsage { + return &QuotaUsage{PayerAddress: payer} + }) + q.ActiveSessions += sessions + q.ActiveWorkers += workers + q.LastUpdated = time.Now() + r.quotas.Set(payer, q) + return nil +} + +// QuotaDecrement atomically decrements the quota counters for a payer. +// Counters are clamped to zero to prevent underflow. +func (r *InMemoryRepository) QuotaDecrement(_ context.Context, payer string, sessions int, workers int) error { + q, ok := r.quotas.Get(payer) + if !ok { + return nil + } + q.ActiveSessions -= sessions + if q.ActiveSessions < 0 { + q.ActiveSessions = 0 + } + q.ActiveWorkers -= workers + if q.ActiveWorkers < 0 { + q.ActiveWorkers = 0 + } + q.LastUpdated = time.Now() + r.quotas.Set(payer, q) + return nil +} + +// --- Usage accumulation --- + +// UsageAdd appends a usage event to the session's usage log. +func (r *InMemoryRepository) UsageAdd(_ context.Context, sessionID string, usage *UsageEvent) error { + if usage == nil { + return fmt.Errorf("usage event must not be nil") + } + r.usageMu.Lock() + defer r.usageMu.Unlock() + + events, _ := r.usage.GetOrCompute(sessionID, func() []*UsageEvent { + return make([]*UsageEvent, 0, 8) + }) + events = append(events, usage) + r.usage.Set(sessionID, events) + return nil +} + +// UsageGetTotal returns a summary of all usage events for a session. +func (r *InMemoryRepository) UsageGetTotal(_ context.Context, sessionID string) (*UsageSummary, error) { + r.usageMu.Lock() + defer r.usageMu.Unlock() + + events, ok := r.usage.Get(sessionID) + if !ok { + return &UsageSummary{}, nil + } + + summary := &UsageSummary{} + for _, e := range events { + summary.TotalBlocks += e.Blocks + summary.TotalBytes += e.Bytes + summary.TotalRequests += e.Requests + } + return summary, nil +} + +// --- Health/lifecycle --- + +// Ping is a no-op health check for the in-memory implementation. +func (r *InMemoryRepository) Ping(_ context.Context) error { + return nil +} + +// Close is a no-op for the in-memory implementation. +func (r *InMemoryRepository) Close() error { + return nil +} diff --git a/provider/repository/inmemory_test.go b/provider/repository/inmemory_test.go new file mode 100644 index 0000000..c33e2db --- /dev/null +++ b/provider/repository/inmemory_test.go @@ -0,0 +1,403 @@ +package repository_test + +import ( + "context" + "testing" + "time" + + "github.com/graphprotocol/substreams-data-service/provider/repository" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newTestSession(id, payer string) *repository.Session { + return &repository.Session{ + ID: id, + PayerAddress: payer, + Status: repository.SessionStatusActive, + CreatedAt: time.Now(), + } +} + +func newTestWorker(key, sessionID, payer string) *repository.Worker { + return &repository.Worker{ + Key: key, + SessionID: sessionID, + PayerAddress: payer, + CreatedAt: time.Now(), + } +} + +// --- Session tests --- + +func TestInMemory_SessionCreate_Get(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s := newTestSession("s1", "0xpayer1") + require.NoError(t, repo.SessionCreate(ctx, s)) + + got, err := repo.SessionGet(ctx, "s1") + require.NoError(t, err) + assert.Equal(t, "s1", got.ID) + assert.Equal(t, "0xpayer1", got.PayerAddress) +} + +func TestInMemory_SessionCreate_Duplicate(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s := newTestSession("s1", "0xpayer1") + require.NoError(t, repo.SessionCreate(ctx, s)) + + err := repo.SessionCreate(ctx, s) + require.Error(t, err) + assert.Contains(t, err.Error(), "already exists") +} + +func TestInMemory_SessionCreate_NilAndEmpty(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.Error(t, repo.SessionCreate(ctx, nil)) + require.Error(t, repo.SessionCreate(ctx, &repository.Session{ID: ""})) +} + +func TestInMemory_SessionGet_NotFound(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + _, err := repo.SessionGet(ctx, "missing") + require.Error(t, err) + assert.Contains(t, err.Error(), "not found") +} + +func TestInMemory_SessionUpdate(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s := newTestSession("s1", "0xpayer1") + require.NoError(t, repo.SessionCreate(ctx, s)) + + updated := *s + updated.Status = repository.SessionStatusTerminated + require.NoError(t, repo.SessionUpdate(ctx, &updated)) + + got, err := repo.SessionGet(ctx, "s1") + require.NoError(t, err) + assert.Equal(t, repository.SessionStatusTerminated, got.Status) +} + +func TestInMemory_SessionUpdate_NotFound(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s := newTestSession("missing", "0xpayer1") + require.Error(t, repo.SessionUpdate(ctx, s)) +} + +func TestInMemory_SessionDelete(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s := newTestSession("s1", "0xpayer1") + require.NoError(t, repo.SessionCreate(ctx, s)) + require.NoError(t, repo.SessionDelete(ctx, "s1")) + + _, err := repo.SessionGet(ctx, "s1") + require.Error(t, err) +} + +func TestInMemory_SessionDelete_NotFound(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.Error(t, repo.SessionDelete(ctx, "missing")) +} + +func TestInMemory_SessionList_NoFilter(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s1", "0xpayer1"))) + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s2", "0xpayer2"))) + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s3", "0xpayer1"))) + + all, err := repo.SessionList(ctx, repository.SessionFilter{}) + require.NoError(t, err) + assert.Len(t, all, 3) +} + +func TestInMemory_SessionList_ByPayer(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s1", "0xpayer1"))) + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s2", "0xpayer2"))) + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s3", "0xpayer1"))) + + payer := "0xpayer1" + sessions, err := repo.SessionList(ctx, repository.SessionFilter{PayerAddress: &payer}) + require.NoError(t, err) + assert.Len(t, sessions, 2) + for _, s := range sessions { + assert.Equal(t, "0xpayer1", s.PayerAddress) + } +} + +func TestInMemory_SessionList_ByStatus(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + s1 := newTestSession("s1", "0xpayer1") + s2 := newTestSession("s2", "0xpayer1") + s2.Status = repository.SessionStatusTerminated + + require.NoError(t, repo.SessionCreate(ctx, s1)) + require.NoError(t, repo.SessionCreate(ctx, s2)) + + status := repository.SessionStatusActive + active, err := repo.SessionList(ctx, repository.SessionFilter{Status: &status}) + require.NoError(t, err) + assert.Len(t, active, 1) + assert.Equal(t, "s1", active[0].ID) +} + +func TestInMemory_SessionList_ByCreatedAfter(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + past := time.Now().Add(-time.Hour) + future := time.Now().Add(time.Hour) + + s1 := newTestSession("s1", "0xpayer1") + s1.CreatedAt = time.Now().Add(-30 * time.Minute) // between past and future + s2 := newTestSession("s2", "0xpayer1") + s2.CreatedAt = time.Now().Add(-2 * time.Hour) // before past + + require.NoError(t, repo.SessionCreate(ctx, s1)) + require.NoError(t, repo.SessionCreate(ctx, s2)) + + _ = future + sessions, err := repo.SessionList(ctx, repository.SessionFilter{CreatedAfter: &past}) + require.NoError(t, err) + assert.Len(t, sessions, 1) + assert.Equal(t, "s1", sessions[0].ID) +} + +func TestInMemory_SessionGetByPayer(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s1", "0xpayer1"))) + require.NoError(t, repo.SessionCreate(ctx, newTestSession("s2", "0xpayer2"))) + + sessions, err := repo.SessionGetByPayer(ctx, "0xpayer1") + require.NoError(t, err) + require.Len(t, sessions, 1) + assert.Equal(t, "s1", sessions[0].ID) +} + +// --- Worker tests --- + +func TestInMemory_WorkerCreate_Get(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + w := newTestWorker("w1", "s1", "0xpayer1") + require.NoError(t, repo.WorkerCreate(ctx, w)) + + got, err := repo.WorkerGet(ctx, "w1") + require.NoError(t, err) + assert.Equal(t, "w1", got.Key) + assert.Equal(t, "s1", got.SessionID) +} + +func TestInMemory_WorkerCreate_Duplicate(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + w := newTestWorker("w1", "s1", "0xpayer1") + require.NoError(t, repo.WorkerCreate(ctx, w)) + require.Error(t, repo.WorkerCreate(ctx, w)) +} + +func TestInMemory_WorkerGet_NotFound(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + _, err := repo.WorkerGet(ctx, "missing") + require.Error(t, err) +} + +func TestInMemory_WorkerDelete(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + w := newTestWorker("w1", "s1", "0xpayer1") + require.NoError(t, repo.WorkerCreate(ctx, w)) + require.NoError(t, repo.WorkerDelete(ctx, "w1")) + + _, err := repo.WorkerGet(ctx, "w1") + require.Error(t, err) +} + +func TestInMemory_WorkerListBySession(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w1", "s1", "0xpayer1"))) + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w2", "s1", "0xpayer1"))) + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w3", "s2", "0xpayer2"))) + + workers, err := repo.WorkerListBySession(ctx, "s1") + require.NoError(t, err) + assert.Len(t, workers, 2) + + workers2, err := repo.WorkerListBySession(ctx, "s2") + require.NoError(t, err) + assert.Len(t, workers2, 1) +} + +func TestInMemory_WorkerCountByPayer(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w1", "s1", "0xpayer1"))) + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w2", "s1", "0xpayer1"))) + require.NoError(t, repo.WorkerCreate(ctx, newTestWorker("w3", "s2", "0xpayer2"))) + + count, err := repo.WorkerCountByPayer(ctx, "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 2, count) + + count2, err := repo.WorkerCountByPayer(ctx, "0xpayer3") + require.NoError(t, err) + assert.Equal(t, 0, count2) +} + +// --- Quota tests --- + +func TestInMemory_QuotaGet_NewPayer(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + q, err := repo.QuotaGet(ctx, "0xnewpayer") + require.NoError(t, err) + assert.NotNil(t, q) + assert.Equal(t, "0xnewpayer", q.PayerAddress) + assert.Equal(t, 0, q.ActiveSessions) + assert.Equal(t, 0, q.ActiveWorkers) +} + +func TestInMemory_QuotaIncrement(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.QuotaIncrement(ctx, "0xpayer1", 1, 2)) + + q, err := repo.QuotaGet(ctx, "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 1, q.ActiveSessions) + assert.Equal(t, 2, q.ActiveWorkers) + + require.NoError(t, repo.QuotaIncrement(ctx, "0xpayer1", 0, 1)) + + q, err = repo.QuotaGet(ctx, "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 1, q.ActiveSessions) + assert.Equal(t, 3, q.ActiveWorkers) +} + +func TestInMemory_QuotaDecrement(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.QuotaIncrement(ctx, "0xpayer1", 3, 5)) + require.NoError(t, repo.QuotaDecrement(ctx, "0xpayer1", 1, 2)) + + q, err := repo.QuotaGet(ctx, "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 2, q.ActiveSessions) + assert.Equal(t, 3, q.ActiveWorkers) +} + +func TestInMemory_QuotaDecrement_Clamps(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.QuotaIncrement(ctx, "0xpayer1", 1, 1)) + require.NoError(t, repo.QuotaDecrement(ctx, "0xpayer1", 5, 5)) + + q, err := repo.QuotaGet(ctx, "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 0, q.ActiveSessions) + assert.Equal(t, 0, q.ActiveWorkers) +} + +func TestInMemory_QuotaDecrement_NoPayer(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + // No error when payer does not exist yet + require.NoError(t, repo.QuotaDecrement(ctx, "0xunknown", 1, 1)) +} + +// --- Usage tests --- + +func TestInMemory_UsageAdd_GetTotal(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.UsageAdd(ctx, "s1", &repository.UsageEvent{Blocks: 10, Bytes: 200, Requests: 3})) + require.NoError(t, repo.UsageAdd(ctx, "s1", &repository.UsageEvent{Blocks: 5, Bytes: 100, Requests: 1})) + + total, err := repo.UsageGetTotal(ctx, "s1") + require.NoError(t, err) + assert.Equal(t, int64(15), total.TotalBlocks) + assert.Equal(t, int64(300), total.TotalBytes) + assert.Equal(t, int64(4), total.TotalRequests) +} + +func TestInMemory_UsageGetTotal_Empty(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + total, err := repo.UsageGetTotal(ctx, "nonexistent") + require.NoError(t, err) + assert.NotNil(t, total) + assert.Equal(t, int64(0), total.TotalBlocks) +} + +func TestInMemory_UsageAdd_NilEvent(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.Error(t, repo.UsageAdd(ctx, "s1", nil)) +} + +func TestInMemory_UsageAdd_MultipleSessionsIsolated(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.UsageAdd(ctx, "s1", &repository.UsageEvent{Blocks: 10})) + require.NoError(t, repo.UsageAdd(ctx, "s2", &repository.UsageEvent{Blocks: 20})) + + total1, err := repo.UsageGetTotal(ctx, "s1") + require.NoError(t, err) + assert.Equal(t, int64(10), total1.TotalBlocks) + + total2, err := repo.UsageGetTotal(ctx, "s2") + require.NoError(t, err) + assert.Equal(t, int64(20), total2.TotalBlocks) +} + +// --- Ping / Close --- + +func TestInMemory_PingAndClose(t *testing.T) { + repo := repository.NewInMemoryRepository() + ctx := context.Background() + + require.NoError(t, repo.Ping(ctx)) + require.NoError(t, repo.Close()) +} diff --git a/provider/repository/log_test.go b/provider/repository/log_test.go new file mode 100644 index 0000000..6deb6e8 --- /dev/null +++ b/provider/repository/log_test.go @@ -0,0 +1,11 @@ +package repository_test + +import ( + "github.com/streamingfast/logging" +) + +var zlogTest, _ = logging.PackageLogger("repository_test", "github.com/graphprotocol/substreams-data-service/provider/repository/tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/provider/repository/repository.go b/provider/repository/repository.go new file mode 100644 index 0000000..94f3065 --- /dev/null +++ b/provider/repository/repository.go @@ -0,0 +1,98 @@ +package repository + +import ( + "context" + "time" +) + +// GlobalRepository provides global state storage for live session/client tracking. +// All methods are namespaced by domain (Session*, Client*, Quota*, etc.) +// All implementations must be safe for concurrent use. +type GlobalRepository interface { + // Session management + SessionCreate(ctx context.Context, session *Session) error + SessionGet(ctx context.Context, sessionID string) (*Session, error) + SessionUpdate(ctx context.Context, session *Session) error + SessionDelete(ctx context.Context, sessionID string) error + SessionList(ctx context.Context, filter SessionFilter) ([]*Session, error) + SessionGetByPayer(ctx context.Context, payer string) ([]*Session, error) + + // Worker/connection tracking within sessions + WorkerCreate(ctx context.Context, worker *Worker) error + WorkerGet(ctx context.Context, workerKey string) (*Worker, error) + WorkerDelete(ctx context.Context, workerKey string) error + WorkerListBySession(ctx context.Context, sessionID string) ([]*Worker, error) + WorkerCountByPayer(ctx context.Context, payer string) (int, error) + + // Quota tracking + QuotaGet(ctx context.Context, payer string) (*QuotaUsage, error) + QuotaIncrement(ctx context.Context, payer string, sessions int, workers int) error + QuotaDecrement(ctx context.Context, payer string, sessions int, workers int) error + + // Usage accumulation (for metering) + UsageAdd(ctx context.Context, sessionID string, usage *UsageEvent) error + UsageGetTotal(ctx context.Context, sessionID string) (*UsageSummary, error) + + // Health/lifecycle + Ping(ctx context.Context) error + Close() error +} + +// SessionStatus represents the lifecycle state of a session. +type SessionStatus string + +const ( + SessionStatusActive SessionStatus = "active" + SessionStatusTerminated SessionStatus = "terminated" +) + +// Session represents an active or terminated payment/streaming session. +type Session struct { + ID string + PayerAddress string + SignerAddress string + ServiceProvider string + CreatedAt time.Time + LastKeepAlive time.Time + Status SessionStatus + Metadata map[string]string +} + +// Worker represents a single streaming connection (worker) within a session. +type Worker struct { + Key string + SessionID string + PayerAddress string + CreatedAt time.Time + TraceID string +} + +// QuotaUsage tracks the current quota consumption for a payer address. +type QuotaUsage struct { + PayerAddress string + ActiveSessions int + ActiveWorkers int + LastUpdated time.Time +} + +// UsageEvent represents a single metered usage event within a session. +type UsageEvent struct { + Timestamp time.Time + Blocks int64 + Bytes int64 + Requests int64 +} + +// UsageSummary aggregates total usage across all events for a session. +type UsageSummary struct { + TotalBlocks int64 + TotalBytes int64 + TotalRequests int64 +} + +// SessionFilter specifies criteria for filtering sessions in a list operation. +type SessionFilter struct { + PayerAddress *string + Status *SessionStatus + CreatedAfter *time.Time +} diff --git a/provider/session/log_test.go b/provider/session/log_test.go new file mode 100644 index 0000000..8a14427 --- /dev/null +++ b/provider/session/log_test.go @@ -0,0 +1,11 @@ +package session_test + +import ( + "github.com/streamingfast/logging" +) + +var zlogTest, _ = logging.PackageLogger("session_test", "github.com/graphprotocol/substreams-data-service/provider/session/tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/provider/session/quotas.go b/provider/session/quotas.go new file mode 100644 index 0000000..c9be770 --- /dev/null +++ b/provider/session/quotas.go @@ -0,0 +1,50 @@ +package session + +// QuotaConfig holds the configured quota limits for the session service. +// These can be loaded from the provider config file or set programmatically. +type QuotaConfig struct { + // DefaultMaxConcurrentSessions is the default maximum number of concurrent + // sessions allowed per payer when no per-payer override exists. + DefaultMaxConcurrentSessions int + + // DefaultMaxWorkersPerSession is the default maximum number of concurrent + // workers (streaming connections) allowed per session. + DefaultMaxWorkersPerSession int + + // PerPayerOverrides maps a payer address (lowercase hex 0x...) to its + // specific quota limits. Overrides take precedence over the defaults. + PerPayerOverrides map[string]*PayerQuota +} + +// PayerQuota holds per-payer quota overrides. +type PayerQuota struct { + MaxConcurrentSessions int + MaxWorkersPerSession int +} + +// DefaultQuotaConfig returns a sensible default QuotaConfig. +func DefaultQuotaConfig() *QuotaConfig { + return &QuotaConfig{ + DefaultMaxConcurrentSessions: 10, + DefaultMaxWorkersPerSession: 5, + PerPayerOverrides: make(map[string]*PayerQuota), + } +} + +// MaxConcurrentSessions returns the effective maximum number of concurrent +// sessions for the given payer address. +func (c *QuotaConfig) MaxConcurrentSessions(payer string) int { + if override, ok := c.PerPayerOverrides[payer]; ok { + return override.MaxConcurrentSessions + } + return c.DefaultMaxConcurrentSessions +} + +// MaxWorkersPerSession returns the effective maximum number of workers +// per session for the given payer address. +func (c *QuotaConfig) MaxWorkersPerSession(payer string) int { + if override, ok := c.PerPayerOverrides[payer]; ok { + return override.MaxWorkersPerSession + } + return c.DefaultMaxWorkersPerSession +} diff --git a/provider/session/service.go b/provider/session/service.go new file mode 100644 index 0000000..c27bc60 --- /dev/null +++ b/provider/session/service.go @@ -0,0 +1,242 @@ +// Package session implements the gRPC SessionService that manages worker-pool +// slots for the dsession tgm:// plugin used by firehose-core tier1. +package session + +import ( + "context" + "fmt" + "time" + + "connectrpc.com/connect" + sessionv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect" + "github.com/graphprotocol/substreams-data-service/provider/repository" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +var zlog, _ = logging.PackageLogger("sds_session", "github.com/graphprotocol/substreams-data-service/provider/session") + +// SessionService implements sessionv1connect.SessionServiceHandler. +// It manages worker-pool slots and enforces per-payer quotas. +type SessionService struct { + repo repository.GlobalRepository + quotas *QuotaConfig +} + +var _ sessionv1connect.SessionServiceHandler = (*SessionService)(nil) + +// NewSessionService creates a new SessionService. +// If quotas is nil, DefaultQuotaConfig() is used. +func NewSessionService(repo repository.GlobalRepository, quotas *QuotaConfig) *SessionService { + if quotas == nil { + quotas = DefaultQuotaConfig() + } + return &SessionService{repo: repo, quotas: quotas} +} + +// BorrowWorker acquires a worker slot for a new streaming request. +// +// - If the payer has reached max concurrent sessions a new session is +// created here (workers and sessions are treated as equivalent in the +// in-memory model; the dsession protocol is one worker == one connection). +// - Returns RESOURCE_EXHAUSTED if the payer's quota is exceeded. +func (s *SessionService) BorrowWorker( + ctx context.Context, + req *connect.Request[sessionv1.BorrowWorkerRequest], +) (*connect.Response[sessionv1.BorrowWorkerResponse], error) { + payer := req.Msg.OrganizationId + traceID := req.Msg.TraceId + + if payer == "" { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("organization_id is required")) + } + + zlog.Debug("BorrowWorker called", + zap.String("payer", payer), + zap.String("trace_id", traceID), + zap.String("service", req.Msg.Service), + ) + + // Check current quota usage. + quota, err := s.repo.QuotaGet(ctx, payer) + if err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("reading quota: %w", err)) + } + + maxWorkers := s.quotas.MaxWorkersPerSession(payer) * s.quotas.MaxConcurrentSessions(payer) + + if quota.ActiveWorkers >= maxWorkers { + zlog.Warn("quota exceeded for payer", + zap.String("payer", payer), + zap.Int("active_workers", quota.ActiveWorkers), + zap.Int("max_workers", maxWorkers), + ) + return connect.NewResponse(&sessionv1.BorrowWorkerResponse{ + Status: sessionv1.BorrowStatus_BORROW_STATUS_RESOURCE_EXHAUSTED, + WorkerState: &sessionv1.WorkerState{ + MaxWorkers: int64(maxWorkers), + ActiveWorkers: int64(quota.ActiveWorkers), + }, + }), nil + } + + // Create a session for this worker if trace_id implies one (one-to-one mapping). + sessionID := buildSessionID(payer, traceID) + + // Ensure session exists. + if _, getErr := s.repo.SessionGet(ctx, sessionID); getErr != nil { + newSession := &repository.Session{ + ID: sessionID, + PayerAddress: payer, + Status: repository.SessionStatusActive, + CreatedAt: time.Now(), + LastKeepAlive: time.Now(), + } + if createErr := s.repo.SessionCreate(ctx, newSession); createErr != nil { + // A concurrent BorrowWorker may have created the session first; that's fine. + zlog.Debug("session already exists or create failed", + zap.String("session_id", sessionID), + zap.Error(createErr), + ) + } + } + + // Create the worker entry. + workerKey := buildWorkerKey(payer, traceID, time.Now()) + worker := &repository.Worker{ + Key: workerKey, + SessionID: sessionID, + PayerAddress: payer, + CreatedAt: time.Now(), + TraceID: traceID, + } + if err := s.repo.WorkerCreate(ctx, worker); err != nil { + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("creating worker: %w", err)) + } + + // Increment quota. + if err := s.repo.QuotaIncrement(ctx, payer, 0, 1); err != nil { + // Non-fatal: log and continue (quota is eventually consistent in the in-memory model). + zlog.Warn("failed to increment quota", zap.String("payer", payer), zap.Error(err)) + } + + zlog.Debug("worker borrowed", + zap.String("worker_key", workerKey), + zap.String("session_id", sessionID), + zap.String("payer", payer), + ) + + return connect.NewResponse(&sessionv1.BorrowWorkerResponse{ + WorkerKey: workerKey, + Status: sessionv1.BorrowStatus_BORROW_STATUS_BORROWED, + WorkerState: &sessionv1.WorkerState{ + MaxWorkers: int64(maxWorkers), + ActiveWorkers: int64(quota.ActiveWorkers + 1), + }, + }), nil +} + +// ReturnWorker releases a previously borrowed worker slot. +func (s *SessionService) ReturnWorker( + ctx context.Context, + req *connect.Request[sessionv1.ReturnWorkerRequest], +) (*connect.Response[sessionv1.ReturnWorkerResponse], error) { + workerKey := req.Msg.WorkerKey + if workerKey == "" { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("worker_key is required")) + } + + zlog.Debug("ReturnWorker called", zap.String("worker_key", workerKey)) + + // Look up the worker to find the payer. + worker, err := s.repo.WorkerGet(ctx, workerKey) + if err != nil { + // Worker not found - may have already been returned. + zlog.Warn("worker not found for return", zap.String("worker_key", workerKey), zap.Error(err)) + return connect.NewResponse(&sessionv1.ReturnWorkerResponse{}), nil + } + + payer := worker.PayerAddress + + // Honor minimal_worker_life_duration: if the worker has not been alive + // long enough we simply wait before acknowledging the return. + if req.Msg.MinimalWorkerLifeDuration != nil { + minDuration := req.Msg.MinimalWorkerLifeDuration.AsDuration() + elapsed := time.Since(worker.CreatedAt) + if elapsed < minDuration { + remaining := minDuration - elapsed + zlog.Debug("waiting for minimal worker life duration", + zap.String("worker_key", workerKey), + zap.Duration("remaining", remaining), + ) + select { + case <-time.After(remaining): + case <-ctx.Done(): + return nil, connect.NewError(connect.CodeDeadlineExceeded, ctx.Err()) + } + } + } + + // Delete the worker. + if err := s.repo.WorkerDelete(ctx, workerKey); err != nil { + zlog.Warn("failed to delete worker", zap.String("worker_key", workerKey), zap.Error(err)) + } + + // Decrement quota. + if err := s.repo.QuotaDecrement(ctx, payer, 0, 1); err != nil { + zlog.Warn("failed to decrement quota", zap.String("payer", payer), zap.Error(err)) + } + + zlog.Debug("worker returned", zap.String("worker_key", workerKey), zap.String("payer", payer)) + + return connect.NewResponse(&sessionv1.ReturnWorkerResponse{}), nil +} + +// KeepAlive refreshes the session's last-seen timestamp so it is not +// garbage-collected by background cleanup routines. +func (s *SessionService) KeepAlive( + ctx context.Context, + req *connect.Request[sessionv1.KeepAliveRequest], +) (*connect.Response[sessionv1.KeepAliveResponse], error) { + workerKey := req.Msg.WorkerKey + if workerKey == "" { + return nil, connect.NewError(connect.CodeInvalidArgument, fmt.Errorf("worker_key is required")) + } + + zlog.Debug("KeepAlive called", zap.String("worker_key", workerKey)) + + worker, err := s.repo.WorkerGet(ctx, workerKey) + if err != nil { + // Worker not found is non-fatal; the session may have been cleaned up. + return connect.NewResponse(&sessionv1.KeepAliveResponse{}), nil + } + + session, err := s.repo.SessionGet(ctx, worker.SessionID) + if err != nil { + return connect.NewResponse(&sessionv1.KeepAliveResponse{}), nil + } + + session.LastKeepAlive = time.Now() + if updateErr := s.repo.SessionUpdate(ctx, session); updateErr != nil { + zlog.Warn("failed to update session keep-alive", + zap.String("session_id", session.ID), + zap.Error(updateErr), + ) + } + + return connect.NewResponse(&sessionv1.KeepAliveResponse{}), nil +} + +// buildSessionID constructs a stable session ID for a (payer, traceID) pair. +func buildSessionID(payer, traceID string) string { + if traceID != "" { + return fmt.Sprintf("%s|%s", payer, traceID) + } + return payer +} + +// buildWorkerKey constructs a unique worker key. +func buildWorkerKey(payer, traceID string, createdAt time.Time) string { + return fmt.Sprintf("%s|%s|%d", payer, traceID, createdAt.UnixNano()) +} diff --git a/provider/session/service_test.go b/provider/session/service_test.go new file mode 100644 index 0000000..29d8e16 --- /dev/null +++ b/provider/session/service_test.go @@ -0,0 +1,215 @@ +package session_test + +import ( + "context" + "testing" + + "connectrpc.com/connect" + sessionv1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1" + "github.com/graphprotocol/substreams-data-service/provider/repository" + "github.com/graphprotocol/substreams-data-service/provider/session" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newTestService(quotas *session.QuotaConfig) (*session.SessionService, *repository.InMemoryRepository) { + repo := repository.NewInMemoryRepository() + svc := session.NewSessionService(repo, quotas) + return svc, repo +} + +// --- BorrowWorker --- + +func TestSessionService_BorrowWorker_Success(t *testing.T) { + svc, repo := newTestService(nil) + + resp, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: "substreams", + OrganizationId: "0xpayer1", + TraceId: "trace-001", + })) + require.NoError(t, err) + assert.Equal(t, sessionv1.BorrowStatus_BORROW_STATUS_BORROWED, resp.Msg.Status) + assert.NotEmpty(t, resp.Msg.WorkerKey) + + // Quota should have been incremented. + quota, err := repo.QuotaGet(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 1, quota.ActiveWorkers) +} + +func TestSessionService_BorrowWorker_MissingOrganizationId(t *testing.T) { + svc, _ := newTestService(nil) + + _, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: "substreams", + TraceId: "trace-001", + })) + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeInvalidArgument, connectErr.Code()) +} + +func TestSessionService_BorrowWorker_QuotaExceeded(t *testing.T) { + // Create a config with maxSessions=1, maxWorkers=1 → effective max = 1 worker. + quotas := &session.QuotaConfig{ + DefaultMaxConcurrentSessions: 1, + DefaultMaxWorkersPerSession: 1, + PerPayerOverrides: make(map[string]*session.PayerQuota), + } + svc, _ := newTestService(quotas) + + // Borrow first worker - should succeed. + resp1, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: "substreams", + OrganizationId: "0xpayer1", + TraceId: "trace-001", + })) + require.NoError(t, err) + assert.Equal(t, sessionv1.BorrowStatus_BORROW_STATUS_BORROWED, resp1.Msg.Status) + + // Borrow second worker - should be exhausted. + resp2, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: "substreams", + OrganizationId: "0xpayer1", + TraceId: "trace-002", + })) + require.NoError(t, err) + assert.Equal(t, sessionv1.BorrowStatus_BORROW_STATUS_RESOURCE_EXHAUSTED, resp2.Msg.Status) +} + +func TestSessionService_BorrowWorker_PerPayerOverride(t *testing.T) { + // Default = 1 worker but payer1 has 5 workers override. + quotas := &session.QuotaConfig{ + DefaultMaxConcurrentSessions: 1, + DefaultMaxWorkersPerSession: 1, + PerPayerOverrides: map[string]*session.PayerQuota{ + "0xpayer1": {MaxConcurrentSessions: 5, MaxWorkersPerSession: 2}, + }, + } + svc, _ := newTestService(quotas) + + // Should be able to borrow multiple workers for payer1 (10 max). + for i := range 5 { + resp, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + Service: "substreams", + OrganizationId: "0xpayer1", + TraceId: "trace-" + string(rune('0'+i)), + })) + require.NoError(t, err) + assert.Equal(t, sessionv1.BorrowStatus_BORROW_STATUS_BORROWED, resp.Msg.Status) + } +} + +// --- ReturnWorker --- + +func TestSessionService_ReturnWorker_Success(t *testing.T) { + svc, repo := newTestService(nil) + + borrowResp, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + OrganizationId: "0xpayer1", + TraceId: "trace-001", + })) + require.NoError(t, err) + workerKey := borrowResp.Msg.WorkerKey + + // Return it. + _, err = svc.ReturnWorker(context.Background(), connect.NewRequest(&sessionv1.ReturnWorkerRequest{ + WorkerKey: workerKey, + })) + require.NoError(t, err) + + // Quota should be back to 0. + quota, err := repo.QuotaGet(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, 0, quota.ActiveWorkers) +} + +func TestSessionService_ReturnWorker_MissingKey(t *testing.T) { + svc, _ := newTestService(nil) + + _, err := svc.ReturnWorker(context.Background(), connect.NewRequest(&sessionv1.ReturnWorkerRequest{})) + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeInvalidArgument, connectErr.Code()) +} + +func TestSessionService_ReturnWorker_UnknownKey(t *testing.T) { + svc, _ := newTestService(nil) + + // Returning an unknown key is non-fatal. + _, err := svc.ReturnWorker(context.Background(), connect.NewRequest(&sessionv1.ReturnWorkerRequest{ + WorkerKey: "nonexistent-key", + })) + require.NoError(t, err) +} + +// --- KeepAlive --- + +func TestSessionService_KeepAlive_Success(t *testing.T) { + svc, repo := newTestService(nil) + + borrowResp, err := svc.BorrowWorker(context.Background(), connect.NewRequest(&sessionv1.BorrowWorkerRequest{ + OrganizationId: "0xpayer1", + TraceId: "trace-001", + })) + require.NoError(t, err) + workerKey := borrowResp.Msg.WorkerKey + + _, err = svc.KeepAlive(context.Background(), connect.NewRequest(&sessionv1.KeepAliveRequest{ + WorkerKey: workerKey, + })) + require.NoError(t, err) + + // Verify session LastKeepAlive was updated. + worker, err := repo.WorkerGet(context.Background(), workerKey) + require.NoError(t, err) + + sess, err := repo.SessionGet(context.Background(), worker.SessionID) + require.NoError(t, err) + assert.False(t, sess.LastKeepAlive.IsZero()) +} + +func TestSessionService_KeepAlive_MissingKey(t *testing.T) { + svc, _ := newTestService(nil) + + _, err := svc.KeepAlive(context.Background(), connect.NewRequest(&sessionv1.KeepAliveRequest{})) + require.Error(t, err) + var connectErr *connect.Error + require.ErrorAs(t, err, &connectErr) + assert.Equal(t, connect.CodeInvalidArgument, connectErr.Code()) +} + +func TestSessionService_KeepAlive_UnknownKey(t *testing.T) { + // Unknown key is non-fatal. + svc, _ := newTestService(nil) + + _, err := svc.KeepAlive(context.Background(), connect.NewRequest(&sessionv1.KeepAliveRequest{ + WorkerKey: "unknown-key", + })) + require.NoError(t, err) +} + +// --- QuotaConfig --- + +func TestQuotaConfig_Defaults(t *testing.T) { + q := session.DefaultQuotaConfig() + assert.Equal(t, 10, q.MaxConcurrentSessions("0xanypayer")) + assert.Equal(t, 5, q.MaxWorkersPerSession("0xanypayer")) +} + +func TestQuotaConfig_PerPayerOverride(t *testing.T) { + q := &session.QuotaConfig{ + DefaultMaxConcurrentSessions: 10, + DefaultMaxWorkersPerSession: 5, + PerPayerOverrides: map[string]*session.PayerQuota{ + "0xvip": {MaxConcurrentSessions: 50, MaxWorkersPerSession: 20}, + }, + } + assert.Equal(t, 50, q.MaxConcurrentSessions("0xvip")) + assert.Equal(t, 20, q.MaxWorkersPerSession("0xvip")) + assert.Equal(t, 10, q.MaxConcurrentSessions("0xnormal")) + assert.Equal(t, 5, q.MaxWorkersPerSession("0xnormal")) +} diff --git a/provider/sidecar/handler_payment_session.go b/provider/sidecar/handler_payment_session.go index e6d2844..cffbcec 100644 --- a/provider/sidecar/handler_payment_session.go +++ b/provider/sidecar/handler_payment_session.go @@ -159,7 +159,7 @@ func (s *Sidecar) handleRAVSubmission( } // Verify signature - signerAddr, err := s.verifyRAVSignature(signedRAV) + signerAddr, err := signedRAV.RecoverSigner(s.domain) if err != nil { s.logger.Warn("RAV signature verification failed", zap.Error(err)) stream.Send(&providerv1.PaymentSessionResponse{ diff --git a/provider/sidecar/handler_start_session.go b/provider/sidecar/handler_start_session.go index a2e1719..3284b9b 100644 --- a/provider/sidecar/handler_start_session.go +++ b/provider/sidecar/handler_start_session.go @@ -66,7 +66,7 @@ func (s *Sidecar) StartSession( } if initialRAV != nil && initialRAV.Message != nil { // Verify signature - signerAddr, err := s.verifyRAVSignature(initialRAV) + signerAddr, err := initialRAV.RecoverSigner(s.domain) if err != nil { s.logger.Warn("failed to verify initial RAV signature", zap.Error(err)) return connect.NewResponse(&providerv1.StartSessionResponse{ diff --git a/provider/sidecar/handler_submit_rav.go b/provider/sidecar/handler_submit_rav.go index be7a351..1008de2 100644 --- a/provider/sidecar/handler_submit_rav.go +++ b/provider/sidecar/handler_submit_rav.go @@ -56,7 +56,7 @@ func (s *Sidecar) SubmitRAV( } // Verify signature - signerAddr, err := s.verifyRAVSignature(signedRAV) + signerAddr, err := signedRAV.RecoverSigner(s.domain) if err != nil { s.logger.Warn("failed to verify RAV signature", zap.Error(err)) return connect.NewResponse(&providerv1.SubmitRAVResponse{ diff --git a/provider/sidecar/handler_validate_payment.go b/provider/sidecar/handler_validate_payment.go index 83cfd91..3b2e268 100644 --- a/provider/sidecar/handler_validate_payment.go +++ b/provider/sidecar/handler_validate_payment.go @@ -33,7 +33,7 @@ func (s *Sidecar) ValidatePayment( } // Verify the signature - signerAddr, err := s.verifyRAVSignature(signedRAV) + signerAddr, err := signedRAV.RecoverSigner(s.domain) if err != nil { s.logger.Warn("failed to verify RAV signature", zap.Error(err)) return connect.NewResponse(&providerv1.ValidatePaymentResponse{ diff --git a/provider/sidecar/sidecar.go b/provider/sidecar/sidecar.go index a794873..abaeb02 100644 --- a/provider/sidecar/sidecar.go +++ b/provider/sidecar/sidecar.go @@ -4,12 +4,19 @@ import ( "context" "math/big" "net/http" - "sync" "time" "connectrpc.com/connect" + "github.com/alphadose/haxmap" "github.com/graphprotocol/substreams-data-service/horizon" "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/provider/v1/providerv1connect" + authv1connect "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/auth/v1/authv1connect" + sessionv1connect "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/session/v1/sessionv1connect" + usagev1connect "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect" + providerauth "github.com/graphprotocol/substreams-data-service/provider/auth" + "github.com/graphprotocol/substreams-data-service/provider/repository" + providersession "github.com/graphprotocol/substreams-data-service/provider/session" + providerusage "github.com/graphprotocol/substreams-data-service/provider/usage" "github.com/graphprotocol/substreams-data-service/sidecar" "github.com/streamingfast/dgrpc/server" "github.com/streamingfast/dgrpc/server/connectrpc" @@ -28,7 +35,7 @@ type Sidecar struct { logger *zap.Logger server *connectrpc.ConnectWebServer - // Session management + // Session management (legacy payment sessions) sessions *sidecar.SessionManager // Service provider identity @@ -49,8 +56,13 @@ type Sidecar struct { // Pricing configuration pricingConfig *sidecar.PricingConfig - authCacheMu sync.RWMutex - authCache map[string]authCacheEntry + authCache *haxmap.Map[string, authCacheEntry] + + // Plugin services (serve firehose-core sds:// plugins via Connect) + authService *providerauth.AuthService + usageService *providerusage.UsageService + sessionService *providersession.SessionService + repo repository.GlobalRepository } type Config struct { @@ -61,6 +73,10 @@ type Config struct { EscrowAddr eth.Address RPCEndpoint string PricingConfig *sidecar.PricingConfig + + // QuotaConfig configures per-payer worker quota limits for the session service. + // If nil, DefaultQuotaConfig() is used. + QuotaConfig *providersession.QuotaConfig } type authCacheEntry struct { @@ -84,6 +100,24 @@ func New(config *Config, logger *zap.Logger) *Sidecar { pricingConfig = sidecar.DefaultPricingConfig() } + // Build the global repository and plugin services. + repo := repository.NewInMemoryRepository() + + // The auth service needs to call IsAuthorized on the collector; reuse + // the collectorQuerier from the existing sidecar if available. + var authCollectorQuerier providerauth.CollectorAuthorizer + if collectorQuerier != nil { + authCollectorQuerier = collectorQuerier + } + + authSvc := providerauth.NewAuthService( + config.ServiceProvider, + config.Domain, + authCollectorQuerier, + ) + usageSvc := providerusage.NewUsageService(repo) + sessionSvc := providersession.NewSessionService(repo, config.QuotaConfig) + return &Sidecar{ Shutter: shutter.New(), listenAddr: config.ListenAddr, @@ -96,7 +130,11 @@ func New(config *Config, logger *zap.Logger) *Sidecar { escrowQuerier: escrowQuerier, collectorQuerier: collectorQuerier, pricingConfig: pricingConfig, - authCache: make(map[string]authCacheEntry), + authCache: haxmap.New[string, authCacheEntry](), + repo: repo, + authService: authSvc, + usageService: usageSvc, + sessionService: sessionSvc, } } @@ -113,6 +151,7 @@ func (s *Sidecar) SessionCount() int { } func (s *Sidecar) Run() { + // Connect/HTTP server for SDS services handlerGetters := []connectrpc.HandlerGetter{ func(opts ...connect.HandlerOption) (string, http.Handler) { return providerv1connect.NewProviderSidecarServiceHandler(s, opts...) @@ -120,6 +159,16 @@ func (s *Sidecar) Run() { func(opts ...connect.HandlerOption) (string, http.Handler) { return providerv1connect.NewPaymentGatewayServiceHandler(s, opts...) }, + // Plugin services for sds:// firehose-core plugins + func(opts ...connect.HandlerOption) (string, http.Handler) { + return authv1connect.NewAuthServiceHandler(s.authService, opts...) + }, + func(opts ...connect.HandlerOption) (string, http.Handler) { + return usagev1connect.NewUsageServiceHandler(s.usageService, opts...) + }, + func(opts ...connect.HandlerOption) (string, http.Handler) { + return sessionv1connect.NewSessionServiceHandler(s.sessionService, opts...) + }, } s.server = connectrpc.New( @@ -130,6 +179,9 @@ func (s *Sidecar) Run() { server.WithConnectPermissiveCORS(), server.WithConnectReflection(providerv1connect.ProviderSidecarServiceName), server.WithConnectReflection(providerv1connect.PaymentGatewayServiceName), + server.WithConnectReflection(authv1connect.AuthServiceName), + server.WithConnectReflection(usagev1connect.UsageServiceName), + server.WithConnectReflection(sessionv1connect.SessionServiceName), ) s.server.OnTerminated(func(err error) { @@ -148,11 +200,6 @@ func (s *Sidecar) healthCheck(ctx context.Context) (isReady bool, out interface{ return true, nil, nil } -// verifyRAVSignature verifies a RAV signature and returns the signer address -func (s *Sidecar) verifyRAVSignature(signedRAV *horizon.SignedRAV) (eth.Address, error) { - return signedRAV.RecoverSigner(s.domain) -} - func (s *Sidecar) isSignerAuthorized(ctx context.Context, payer, signer eth.Address) (bool, error) { if sidecar.AddressesEqual(payer, signer) { return true, nil @@ -165,21 +212,16 @@ func (s *Sidecar) isSignerAuthorized(ctx context.Context, payer, signer eth.Addr key := payer.String() + "|" + signer.String() now := time.Now() - s.authCacheMu.RLock() - if entry, ok := s.authCache[key]; ok && now.Before(entry.expires) { - s.authCacheMu.RUnlock() + if entry, ok := s.authCache.Get(key); ok && now.Before(entry.expires) { return entry.ok, nil } - s.authCacheMu.RUnlock() ok, err := s.collectorQuerier.IsAuthorized(ctx, payer, signer) if err != nil { return false, err } - s.authCacheMu.Lock() - s.authCache[key] = authCacheEntry{ok: ok, expires: time.Now().Add(30 * time.Second)} - s.authCacheMu.Unlock() + s.authCache.Set(key, authCacheEntry{ok: ok, expires: time.Now().Add(30 * time.Second)}) return ok, nil } diff --git a/provider/usage/log_test.go b/provider/usage/log_test.go new file mode 100644 index 0000000..fa99fc2 --- /dev/null +++ b/provider/usage/log_test.go @@ -0,0 +1,11 @@ +package usage_test + +import ( + "github.com/streamingfast/logging" +) + +var zlogTest, _ = logging.PackageLogger("usage_test", "github.com/graphprotocol/substreams-data-service/provider/usage/tests") + +func init() { + logging.InstantiateLoggers() +} diff --git a/provider/usage/service.go b/provider/usage/service.go new file mode 100644 index 0000000..af7a3fa --- /dev/null +++ b/provider/usage/service.go @@ -0,0 +1,109 @@ +// Package usage implements the gRPC UsageService that receives batched +// metering events from the dmetering tgm:// plugin used by firehose-core. +package usage + +import ( + "context" + "time" + + "connectrpc.com/connect" + usagev1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1" + "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1/usagev1connect" + "github.com/graphprotocol/substreams-data-service/provider/repository" + "github.com/streamingfast/logging" + "go.uber.org/zap" +) + +var zlog, _ = logging.PackageLogger("sds_usage", "github.com/graphprotocol/substreams-data-service/provider/usage") + +// UsageService implements usagev1connect.UsageServiceHandler. +// It receives batched metering events from the dmetering plugin and stores +// them in the GlobalRepository for later aggregation and reporting. +type UsageService struct { + repo repository.GlobalRepository +} + +var _ usagev1connect.UsageServiceHandler = (*UsageService)(nil) + +// NewUsageService creates a new UsageService backed by the given repository. +func NewUsageService(repo repository.GlobalRepository) *UsageService { + return &UsageService{repo: repo} +} + +// Report receives a batch of metering events from the dmetering plugin. +// For each event, it stores usage data in the repository keyed by +// organization_id (payer address). If the associated session has been +// terminated, it returns revoked=true so the plugin can stop the stream. +func (s *UsageService) Report( + ctx context.Context, + req *connect.Request[usagev1.ReportRequest], +) (*connect.Response[usagev1.ReportResponse], error) { + if len(req.Msg.Events) == 0 { + return connect.NewResponse(&usagev1.ReportResponse{}), nil + } + + zlog.Debug("Report called", zap.Int("event_count", len(req.Msg.Events))) + + for _, event := range req.Msg.Events { + if event == nil { + continue + } + + // Derive a session key from the payer + endpoint combination. + // api_key_id may carry a session ID or signer address in future revisions. + sessionID := deriveSessionID(event) + + usageEvent := protoEventToUsageEvent(event) + + if err := s.repo.UsageAdd(ctx, sessionID, usageEvent); err != nil { + zlog.Warn("failed to record usage event", + zap.String("organization_id", event.OrganizationId), + zap.String("session_id", sessionID), + zap.Error(err), + ) + // Non-fatal: continue processing remaining events. + } + } + + return connect.NewResponse(&usagev1.ReportResponse{ + Revoked: false, + }), nil +} + +// deriveSessionID returns a stable key for aggregating usage by payer. +// When api_key_id is provided (it may carry the session or signer ID in +// future protocol versions), it is used as the session ID. +// Otherwise we fall back to the organization_id (payer address). +func deriveSessionID(event *usagev1.Event) string { + if event.ApiKeyId != "" { + return event.ApiKeyId + } + return event.OrganizationId +} + +// protoEventToUsageEvent converts a proto metering Event to the internal +// UsageEvent by summing the well-known metric counters. +func protoEventToUsageEvent(event *usagev1.Event) *repository.UsageEvent { + ue := &repository.UsageEvent{ + Timestamp: time.Now(), + } + if event.Timestamp != nil { + ue.Timestamp = event.Timestamp.AsTime() + } + + for _, m := range event.Metrics { + if m == nil { + continue + } + switch m.Name { + case "blocks_count", "blocks": + ue.Blocks += m.Value + case "bytes_count", "bytes": + ue.Bytes += m.Value + case "requests_count", "requests": + ue.Requests += m.Value + } + } + + return ue +} diff --git a/provider/usage/service_test.go b/provider/usage/service_test.go new file mode 100644 index 0000000..c67d2c3 --- /dev/null +++ b/provider/usage/service_test.go @@ -0,0 +1,193 @@ +package usage_test + +import ( + "context" + "testing" + "time" + + "connectrpc.com/connect" + usagev1 "github.com/graphprotocol/substreams-data-service/pb/graph/substreams/data_service/sds/usage/v1" + "github.com/graphprotocol/substreams-data-service/provider/repository" + "github.com/graphprotocol/substreams-data-service/provider/usage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func newTestRepo() *repository.InMemoryRepository { + return repository.NewInMemoryRepository() +} + +func TestUsageService_Report_Empty(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + resp, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{})) + require.NoError(t, err) + assert.False(t, resp.Msg.Revoked) +} + +func TestUsageService_Report_SingleEvent(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + ts := timestamppb.New(time.Now()) + resp, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + { + OrganizationId: "0xpayer1", + ApiKeyId: "", + Endpoint: "sf.substreams.rpc.v2/Blocks", + Network: "eth-mainnet", + Timestamp: ts, + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 50}, + {Name: "bytes_count", Value: 1024}, + {Name: "requests_count", Value: 1}, + }, + }, + }, + })) + require.NoError(t, err) + assert.False(t, resp.Msg.Revoked) + + // Usage should have been stored under organization_id (since api_key_id is empty). + total, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(50), total.TotalBlocks) + assert.Equal(t, int64(1024), total.TotalBytes) + assert.Equal(t, int64(1), total.TotalRequests) +} + +func TestUsageService_Report_MultipleEvents_SamePayer(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + _, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + { + OrganizationId: "0xpayer1", + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 10}, + }, + }, + { + OrganizationId: "0xpayer1", + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 20}, + }, + }, + }, + })) + require.NoError(t, err) + + total, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(30), total.TotalBlocks) +} + +func TestUsageService_Report_MultipleEvents_DifferentPayers(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + _, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + { + OrganizationId: "0xpayer1", + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 10}, + }, + }, + { + OrganizationId: "0xpayer2", + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 30}, + }, + }, + }, + })) + require.NoError(t, err) + + total1, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(10), total1.TotalBlocks) + + total2, err := repo.UsageGetTotal(context.Background(), "0xpayer2") + require.NoError(t, err) + assert.Equal(t, int64(30), total2.TotalBlocks) +} + +func TestUsageService_Report_ApiKeyIdAsSessionID(t *testing.T) { + // When api_key_id is set it is used as the session key for usage aggregation. + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + _, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + { + OrganizationId: "0xpayer1", + ApiKeyId: "session-abc123", + Metrics: []*usagev1.Metric{ + {Name: "bytes_count", Value: 512}, + }, + }, + }, + })) + require.NoError(t, err) + + // Should be stored under api_key_id, not organization_id. + total, err := repo.UsageGetTotal(context.Background(), "session-abc123") + require.NoError(t, err) + assert.Equal(t, int64(512), total.TotalBytes) + + // Nothing stored under organization_id. + totalPayer, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(0), totalPayer.TotalBytes) +} + +func TestUsageService_Report_NilEventSkipped(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + resp, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + nil, + { + OrganizationId: "0xpayer1", + Metrics: []*usagev1.Metric{ + {Name: "blocks_count", Value: 5}, + }, + }, + }, + })) + require.NoError(t, err) + assert.False(t, resp.Msg.Revoked) + + total, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(5), total.TotalBlocks) +} + +func TestUsageService_Report_UnknownMetricsIgnored(t *testing.T) { + repo := newTestRepo() + svc := usage.NewUsageService(repo) + + _, err := svc.Report(context.Background(), connect.NewRequest(&usagev1.ReportRequest{ + Events: []*usagev1.Event{ + { + OrganizationId: "0xpayer1", + Metrics: []*usagev1.Metric{ + {Name: "unknown_metric", Value: 999}, + {Name: "blocks_count", Value: 7}, + }, + }, + }, + })) + require.NoError(t, err) + + total, err := repo.UsageGetTotal(context.Background(), "0xpayer1") + require.NoError(t, err) + assert.Equal(t, int64(7), total.TotalBlocks) + assert.Equal(t, int64(0), total.TotalBytes) +}