From 5e95bf04120b948e5ccb3bb05dd48e1f6962baab Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Wed, 18 Feb 2026 21:33:09 +0530 Subject: [PATCH 1/7] Add Server-Timing instrumentation and optimization plan Introduce RequestTimer for per-request phase tracking (init, backend, process, total) exposed via Server-Timing response headers. Add benchmark tooling with --profile mode for collecting timing data. Document phased optimization plan covering streaming architecture, code-level fixes, and open design questions for team review. --- OPTIMIZATION.md | 468 ++++++++++++++++++++++++++ crates/common/src/lib.rs | 1 + crates/common/src/publisher.rs | 6 + crates/common/src/request_timer.rs | 151 +++++++++ crates/fastly/src/main.rs | 12 +- scripts/benchmark.sh | 516 +++++++++++++++++++++++++++++ 6 files changed, 1153 insertions(+), 1 deletion(-) create mode 100644 OPTIMIZATION.md create mode 100644 crates/common/src/request_timer.rs create mode 100755 scripts/benchmark.sh diff --git a/OPTIMIZATION.md b/OPTIMIZATION.md new file mode 100644 index 00000000..817696d7 --- /dev/null +++ b/OPTIMIZATION.md @@ -0,0 +1,468 @@ +# Trusted Server Optimization Plan + +## Status + +| Item | Status | +|------|--------| +| Production timing instrumentation (`RequestTimer`) | **Implemented** (on `feat/optimize-ts`, not yet deployed) | +| Benchmark tooling (`scripts/benchmark.sh`) | **Implemented** (includes `--profile` mode) | +| Viceroy baseline measurements | **Complete** | +| Staging external TTFB baseline | **Complete** (against staging deployment) | +| Server-Timing production data | **Blocked** — needs `feat/optimize-ts` deployed to staging | +| Streaming architecture (`stream_to_client`) | **Planned** — see Phase 2 | +| Code-level optimizations | **Planned** — see Phase 1 | + +--- + +## Key Finding: Streaming to Client IS Possible + +The Fastly Compute SDK provides `Response::stream_to_client()` which returns a `StreamingBody` handle that implements `std::io::Write`. Headers are sent immediately and body chunks stream as they're written. + +```rust +// Current: fully buffered (no bytes reach client until everything is done) +let body = response.take_body(); +let mut output = Vec::new(); +pipeline.process(body, &mut output)?; // blocks until complete +response.set_body(Body::from(output)); // only NOW does client get anything +return Ok(response); + +// Possible: streaming (headers sent immediately, body chunks as processed) +let body = response.take_body(); +let mut streaming = response.stream_to_client(); // headers sent NOW +pipeline.process(body, &mut streaming)?; // each write() → client +streaming.finish()?; +``` + +This changes the optimization strategy — **time-to-last-byte (TTLB) and peak memory CAN be significantly reduced**. TTFB itself is still gated by the Fastly platform floor (~200ms) plus backend response time, but body bytes start reaching the client as soon as the first chunk is processed instead of waiting for the entire response to be buffered. + +### Compatibility with `#[fastly::main]` — NEEDS SPIKE + +`stream_to_client()` consumes the Response and starts sending. The Fastly SDK enforces that only **one response** is sent per request via `assert_single_downstream_response_is_sent()`. The `#[fastly::main]` macro wraps your function and calls `send_to_client()` on the returned `Response` — so if `stream_to_client()` was already called, the macro **will trigger a panic**. + +**This is an unresolved design problem that needs a prototype before committing to the streaming architecture.** + +Possible approaches (all need validation): + +1. **Drop the `#[fastly::main]` macro** — write a raw `main()` that handles the Fastly request lifecycle manually, giving full control over when/how the response is sent. Need to verify the Fastly SDK supports this. +2. **Use `std::process::exit(0)` after streaming** — call `stream_to_client()`, process, `finish()`, then `exit(0)` before the macro gets a chance to call `send_to_client()`. Ugly, but may work if Fastly doesn't require cleanup. +3. **Return a sentinel Response** — investigate whether the SDK treats an already-sent response as a no-op instead of panicking. (Current source code suggests it **does** panic — needs testing on actual Fastly Compute, not just source reading.) + +**Action item**: Create a minimal Fastly Compute service that calls `stream_to_client()` and test each approach. This spike should be done **before** any Phase 2 implementation work. + +Non-streaming endpoints (static JS, discovery, auction) continue returning `Response` normally. Only the publisher proxy path (the hot path) would use streaming. + +--- + +## How to Use This Document + +**For any optimization work:** + +1. Run `./scripts/benchmark.sh --save baseline` on `main` +2. Make your change on a branch +3. Rebuild: `fastly compute build` +4. Run `./scripts/benchmark.sh --save branch-name` +5. Compare: `diff benchmark-results/baseline.txt benchmark-results/branch-name.txt` +6. For production: `BENCH_URL=https://your-staging.edgecompute.app ./scripts/benchmark.sh --profile` +7. If the numbers don't improve meaningfully, don't ship it + +--- + +## Baseline Measurements + +### Viceroy (Local Simulator) + +Measured on `main` branch. Value is in **relative comparison between branches**, not absolute values. + +| Endpoint | P50 | P95 | Req/sec | Notes | +|---|---|---|---|---| +| `GET /static/tsjs=tsjs-unified.min.js` | 1.9 ms | 3.1 ms | 4,672 | Pure WASM, no backend | +| `GET /.well-known/trusted-server.json` | 1.3 ms | 1.4 ms | ~770 | Server-side only | +| `GET /` (publisher proxy) | 400 ms | 595 ms | 21 | Proxies to golf.com, 222KB HTML | +| `POST /auction` | 984 ms | 1,087 ms | 9.3 | Calls Prebid + APS backends | + +- **WASM heap**: 3.0-4.1 MB per request +- **Init overhead**: <2ms (settings parse + orchestrator + registry) +- **No cold start pattern** detected in Viceroy + +### Staging (External) + +Measured externally against staging deployment (golf.com proxy), `main` branch (no Server-Timing deployed yet). + +| Endpoint | TTFB | Total | Size | Notes | +|---|---|---|---|---| +| `GET /static/tsjs=tsjs-unified.min.js` | ~204 ms | ~219 ms | 28 KB | No backend, pure platform overhead | +| `GET /` (publisher proxy, golf.com) | ~234 ms | ~441 ms | 230 KB | Backend + processing | +| `GET /.well-known/trusted-server.json` | ~191 ms | - | - | Returns 500 (needs investigation) | + +**Key insight**: Static JS has ~204ms TTFB with zero backend work. This is the **Fastly platform floor** (WASM instantiation + edge routing + TLS). Application code cannot reduce this. The publisher proxy adds only ~30ms TTFB on top — but the full ~441ms total includes waiting for the entire response to be buffered before sending. With streaming, the ~207ms gap between TTFB (234ms) and TTLB (441ms) would shrink because body bytes stream as they're processed instead of being fully buffered. + +--- + +## Implementation Plan + +### Phase 0: Deploy Server-Timing Instrumentation (DONE, needs deploy) + +**Branch**: `feat/optimize-ts` + +Already implemented: +- `RequestTimer` in `crates/common/src/request_timer.rs` — tracks `init`, `backend`, `process`, `total` phases +- `Server-Timing` header emitted on every response +- Wired into `main.rs` and `publisher.rs` +- `scripts/benchmark.sh --profile` mode to collect and report Server-Timing data + +**Action**: Deploy `feat/optimize-ts` to staging, then run: +```bash +BENCH_URL=https://.edgecompute.app ./scripts/benchmark.sh --profile +``` + +This gives us the real `init`/`backend`/`process` split for golf.com requests. + +**Conflict with Phase 2 streaming**: The `Server-Timing` header is currently set **after** response processing completes (line 147 of `main.rs`), which includes `backend` and `process` phase durations. When we switch to `stream_to_client()` in Phase 2, headers are sent **before** processing starts — so `Server-Timing` cannot include the `backend`/`process`/`total` values. + +Options for Phase 2: +- **Move Server-Timing to a trailer** — `StreamingBody` supports trailers via `StreamingBodyExt::append_trailer()`. Requires client support (browsers generally ignore trailers). +- **Log-only instrumentation** — keep `RequestTimer` for server-side logging but don't include it in response headers on streaming paths. Non-streaming endpoints still get the header. +- **Keep both paths** — Phase 0 instrumentation continues working on the buffered path. Once streaming is validated and deployed, accept that Server-Timing headers are only available for non-streaming endpoints (static, auction, discovery). + +This is **not a blocker** for Phase 0 — the instrumentation is valuable right now on the current buffered architecture. Just be aware it will need adjustment when streaming lands. + +--- + +### Phase 1: Low-Risk Code Optimizations + +These are small, safe changes that reduce CPU and memory waste. Ship as one PR, measure before/after. + +#### 1.1 Fix gzip streaming — remove full-body buffering + +**File**: `crates/common/src/streaming_processor.rs` — `process_gzip_to_gzip` + +**Problem**: Reads entire decompressed body into memory via `read_to_end`, despite deflate/brotli paths already using chunk-based `process_through_compression`. + +**Fix**: 3 lines — use `process_through_compression` like deflate/brotli: + +```rust +fn process_gzip_to_gzip(&mut self, input: R, output: W) -> Result<...> { + let decoder = GzDecoder::new(input); + let encoder = GzEncoder::new(output, Compression::default()); + self.process_through_compression(decoder, encoder) +} +``` + +| Impact | LOC | Risk | +|--------|-----|------| +| **High** (most responses are gzip; reduces peak memory) | -15/+3 | Low | + +#### 1.2 Fix `HtmlRewriterAdapter` — enable true streaming + +**File**: `crates/common/src/streaming_processor.rs` — `HtmlRewriterAdapter` + +**Problem**: Accumulates entire HTML document before processing, defeating the streaming pipeline. The comment says this is a `lol_html` limitation — **it's not**. `lol_html::HtmlRewriter` supports incremental `write()` calls and emits output via its `OutputSink` callback per-chunk. + +**Fix**: Create the `HtmlRewriter` eagerly in `new()`, use `Rc>>` via the public `lol_html::OutputSink` trait to share the output buffer: + +```rust +struct RcVecSink(Rc>>); + +impl lol_html::OutputSink for RcVecSink { + fn handle_chunk(&mut self, chunk: &[u8]) { + self.0.borrow_mut().extend_from_slice(chunk); + } +} + +pub struct HtmlRewriterAdapter { + rewriter: Option>, + output: Rc>>, +} + +impl StreamProcessor for HtmlRewriterAdapter { + fn process_chunk(&mut self, chunk: &[u8], is_last: bool) -> Result, io::Error> { + if let Some(rewriter) = &mut self.rewriter { + if !chunk.is_empty() { + rewriter.write(chunk)?; + } + } + if is_last { + if let Some(rewriter) = self.rewriter.take() { + rewriter.end()?; + } + } + // Drain whatever lol_html produced + Ok(std::mem::take(&mut *self.output.borrow_mut())) + } +} +``` + +| Impact | LOC | Risk | +|--------|-----|------| +| **High** (HTML is most common content type; eliminates 222KB+ buffer) | ~30 refactored | Medium — needs test coverage | + +#### 1.3 Remove verbose per-request logging + +**Files**: `crates/fastly/src/main.rs:37,64-67` + +**Problem**: `log::info!("Settings {settings:?}")` serializes the entire Settings struct (~2KB) on every request. `FASTLY_SERVICE_VERSION` env var logged at info level. + +**Fix**: Remove settings dump or gate behind `log::debug!`. + +| Impact | LOC | Risk | +|--------|-----|------| +| Medium-High | ~3 | None | + +#### 1.4 Trivial fixes batch + +| Fix | File | LOC | +|-----|------|-----| +| Const cookie prefix instead of `format!()` | `publisher.rs:207-210` | 2 | +| `mem::take` instead of `clone` for overlap buffer | `streaming_replacer.rs:63` | 1 | +| `eq_ignore_ascii_case` for compression detection | `streaming_processor.rs:47` | 5 | +| `Cow` for string replacements | `streaming_replacer.rs:120-125` | 5-10 | +| Remove base64 roundtrip in token computation | `http_util.rs:286-294` | 10-15 | +| Replace Handlebars with manual interpolation | `synthetic.rs:82-99` | ~20 | +| Cache `origin_host()` result per-request | `settings.rs` | 5-10 | + +--- + +### Phase 2: Streaming Response Architecture + +This is the high-impact architectural change. Uses Fastly's `stream_to_client()` API to send response headers and body chunks to the client as they're processed, instead of buffering everything. + +#### 2.1 Publisher proxy: `stream_to_client()` integration + +**Files**: `crates/common/src/publisher.rs`, `crates/fastly/src/main.rs` + +**Current flow** (fully buffered): +``` +req.send() → wait for full response → take_body() + → process_response_streaming() → collects into Vec + → Body::from(output) → return complete Response +``` + +**New flow** (streaming): +``` +req.send() → take_body() → set response headers + → stream_to_client() → returns StreamingBody (headers sent immediately) + → pipeline.process(body, &mut streaming_body) → chunks written to client as processed + → streaming_body.finish() +``` + +**Key enablers**: +- `StreamingPipeline.process()` already accepts `W: Write` — `StreamingBody` implements `Write` +- With Phase 1 fixes (gzip streaming + HTML rewriter streaming), the pipeline is already chunk-based +- Non-text responses can use `streaming_body.append(body)` for O(1) pass-through + +**Architecture change in `main.rs`**: The publisher proxy path calls `stream_to_client()` directly instead of returning a `Response`. Other endpoints (static, auction, discovery) continue returning `Response` as before. + +**Error handling for streaming**: Once `stream_to_client()` is called, response headers (including status 200) are already sent. If processing fails mid-stream: +- We cannot change the status code — the client already received 200 +- The `StreamingBody` will be aborted on drop (client sees incomplete response) +- We should log the error server-side for debugging +- This is the same trade-off every streaming proxy makes (nginx, Cloudflare Workers, etc.) + +To mitigate: validate backend response status and content-type **before** calling `stream_to_client()`. If the backend returns an error, fall back to the buffered path to return a proper error response. + +```rust +// Fetch from backend (blocks for full response including headers) +let mut backend_resp = req.send(&backend)?; + +// Check backend status BEFORE committing to streaming +if !backend_resp.get_status().is_success() || !should_process_content_type(&backend_resp) { + // Buffered path — can return proper error/pass-through response + return Ok(backend_resp); +} + +// Commit to streaming — headers sent to client NOW +let backend_body = backend_resp.take_body(); +let mut client_body = backend_resp.stream_to_client(); + +// Process chunks — errors logged but response is already in flight +match pipeline.process(backend_body, &mut client_body) { + Ok(()) => client_body.finish()?, + Err(e) => { + log::error!("Streaming processing failed: {:?}", e); + // StreamingBody dropped → client sees truncated response + // This is the best we can do after headers are sent + } +} +``` + +| Impact | LOC | Risk | +|--------|-----|------| +| **High** — reduces time-to-last-byte and peak memory for all proxied pages | ~80-120 | Medium — error handling requires careful design | + +#### 2.2 Concurrent origin fetch + auction (future) + +**Not applicable for golf.com** (no on-page auction), but for publishers with auction. + +The idea: use `req.send_async()` to launch the origin fetch concurrently with auction backend calls (which already use `fastly::http::request::select()` internally). When the origin response arrives, start streaming it to the client via `stream_to_client()`. When the lol_html rewriter reaches the ad injection point in the HTML, check if auction results are available. + +This would overlap origin fetch time with auction execution, so the browser starts receiving `` content (CSS, fonts) while the auction is still running. + +**Note**: This requires significant refactoring of the auction orchestrator and HTML processor to support async injection. The pseudo-code in the teammate's proposal (`origin_pending.poll()`, `run_auction_async`) represents the desired architecture but these APIs don't exist yet and would need to be built. + +| Impact | LOC | Risk | +|--------|-----|------| +| **Very High** for auction pages — browser starts loading ~400ms earlier | ~150-200 | High — complex coordination | + +--- + +### Phase 3: Measure and Validate + +After implementing Phases 1-2: + +1. Deploy to staging +2. Run `./scripts/benchmark.sh --profile` against staging +3. Compare Server-Timing data: `init`/`backend`/`process`/`total` before vs after +4. Compare external TTFB and time-to-last-byte +5. Check Fastly dashboard for memory/compute metrics +6. If improvement is marginal, don't ship the streaming architecture (Phase 2) + +**Success criteria**: +- Peak memory per request reduced by 30%+ (measurable via Fastly logs) +- Time-to-last-byte reduced for large HTML pages +- No regression on static endpoints or auction +- Code complexity is justified by measured improvement + +--- + +## Optimization Summary Table + +| # | Optimization | Impact | LOC | Risk | Phase | +|---|---|---|---|---|---| +| **P0** | Server-Timing instrumentation | Prerequisite | Done | None | 0 | +| **1.1** | Gzip streaming fix | **High** (memory) | -15/+3 | Low | 1 | +| **1.2** | HTML rewriter streaming | **High** (memory) | ~30 | Medium | 1 | +| **1.3** | Remove verbose logging | Medium-High | ~3 | None | 1 | +| **1.4** | Trivial fixes batch | Low-Medium | ~50 | None | 1 | +| **2.1** | `stream_to_client()` integration | **High** (TTLB) | ~80-120 | Medium | 2 | +| **2.2** | Concurrent origin + auction | **Very High** | ~150-200 | High | 2 (future) | + +--- + +## Architecture: Current vs Target + +### Current (fully buffered) + +``` +Client → Fastly Edge → [WASM starts] + → init (settings, orchestrator, registry) ~1ms + → req.send(backend) blocks for full response + → response.take_body() full body in memory + → GzDecoder.read_to_end() full decompressed in memory + → HtmlRewriterAdapter accumulates all input full HTML in memory + → lol_html processes entire document full output in memory + → GzEncoder.write_all() full recompressed in memory + → Body::from(output) Response constructed + → return Response NOW client gets first byte +``` + +**Memory**: compressed + decompressed + processed + recompressed = ~4x response size +**TTLB**: cannot send any bytes until all processing is complete + +### Target (streaming) + +``` +Client → Fastly Edge → [WASM starts] + → init (settings, orchestrator, registry) ~1ms + → req.send(backend) blocks for full response (same as current) + → response.take_body() body available as Read stream + → validate status, set response headers + → stream_to_client() headers sent to client NOW + → GzDecoder.read(8KB chunk) 8KB decompressed + → HtmlRewriter.write(chunk) output emitted via callback + → GzEncoder.write(processed) compressed chunk + → StreamingBody.write(chunk) chunk sent to client + → ... repeat for each chunk ... + → StreamingBody.finish() done +``` + +**Memory**: ~8KB chunk buffer + lol_html internal state (significantly less than 4x response size — exact savings need measurement) +**TTLB**: client receives first body bytes after first processed chunk, instead of waiting for all processing to complete. For a 222KB page, the savings is the entire processing time (decompression + rewriting + recompression). + +--- + +## Benchmarking Setup + +### Prerequisites + +```bash +brew install hey # HTTP load testing tool (auto-installed by benchmark.sh) +``` + +### Available Modes + +```bash +./scripts/benchmark.sh # Full benchmark suite +./scripts/benchmark.sh --quick # Quick smoke test +./scripts/benchmark.sh --ttfb # TTFB analysis only +./scripts/benchmark.sh --load-test # Load test only +./scripts/benchmark.sh --cold-start # Cold start analysis +./scripts/benchmark.sh --profile # Server-Timing phase breakdown +./scripts/benchmark.sh --save baseline # Save results to file +./scripts/benchmark.sh --compare baseline # Compare against saved results +``` + +### Profiling Against Staging + +```bash +# Requires Server-Timing branch deployed +BENCH_URL=https://example.edgecompute.app ./scripts/benchmark.sh --profile +``` + +### What the Benchmark Measures + +| Test | What it tells you | +|---|---| +| TTFB analysis | 20 sequential requests — detects cold start patterns | +| Cold start | First vs subsequent request latency | +| Endpoint latency | Per-endpoint timing breakdown (DNS, connect, TTFB, total) | +| Load test (hey) | Throughput (req/sec), latency distribution (P50/P95/P99) | +| Profile | Server-Timing phase breakdown: `init`/`backend`/`process`/`total` with min/avg/max/p95 | + +### What the Benchmark Does NOT Measure + +- Real Fastly edge performance (Viceroy is a simulator) +- WASM cold start on actual Fastly infrastructure +- Production TLS handshake overhead +- Memory usage (use Fastly dashboard or Viceroy logs) + +--- + +## Notes for Team + +### What's already on `feat/optimize-ts` branch (uncommitted) + +| File | Change | +|------|--------| +| `crates/common/src/request_timer.rs` | **New** — `RequestTimer` with `Server-Timing` header output | +| `crates/common/src/lib.rs` | Added `pub mod request_timer;` | +| `crates/fastly/src/main.rs` | Wired timer: created at top, `mark_init()`, passed to handlers, `Server-Timing` header set | +| `crates/common/src/publisher.rs` | Added `mark_backend()` after `req.send()`, `mark_process()` after body processing | +| `scripts/benchmark.sh` | Added `--profile` mode, auto-install `hey` | +| `OPTIMIZATION.md` | This document | + +### Teammate's `streaming_processor.rs` Changes + +A teammate has prepared changes to `streaming_processor.rs` that address items 1.1 and 1.2: + +- **Gzip fix**: `process_gzip_to_gzip` now uses `process_through_compression` (3-line change) +- **HTML rewriter fix**: `HtmlRewriterAdapter` rewritten to use `lol_html::OutputSink` trait with `Rc>>` for incremental streaming + +**Review notes on the HTML rewriter change**: +- `lol_html::OutputSink` is a public trait (verified in lol_html 2.7.1) +- The `Rc` pattern is necessary because `HtmlRewriter::new()` takes ownership of the sink, but we need to read output in `process_chunk()` +- `Option` with `.take()` is correct — `end()` consumes self +- The adapter is no longer reusable after `end()` — one per document, which matches actual usage +- Tests correctly updated to collect output across all chunks + +**Correctness issue — must fix in same PR**: `process_through_compression` uses `drop(encoder)` for finalization. For `GzEncoder`, `Drop` calls `finish()` internally but **silently ignores errors**. The gzip trailer contains a CRC32 checksum — if `finish()` fails, corrupted gzip responses are served to clients without any error being reported. This is a pre-existing issue (deflate/brotli have the same `drop()` pattern) but it **must be fixed** when gzip moves to this code path, since gzip is the most common encoding. + +Fix: change `process_through_compression` to accept an optional finalization closure, or add a separate `process_gzip_to_gzip` that calls `encoder.finish()` explicitly after `process_through_compression`-style chunk loop. + +### Decisions Needed + +1. **Deploy `feat/optimize-ts` to staging?** — Needed to get real Server-Timing data before proceeding with optimizations +2. **`#[fastly::main]` + `stream_to_client()` spike** — The macro calls `send_to_client()` on the returned Response, which will panic if streaming was already started. We need a minimal prototype to validate the approach before any Phase 2 work. See "Compatibility with `#[fastly::main]`" section above. +3. **Phase 1 vs Phase 2 priority** — Phase 1 (code fixes) is low risk and can ship independently. Phase 2 (streaming architecture) is higher impact but higher risk, and blocked on decision #2. +4. **Server-Timing on streaming paths** — `stream_to_client()` sends headers before processing completes, so `Server-Timing` can't include `backend`/`process` phases. Options: trailers, log-only, or accept the limitation. See Phase 0 section. +5. **Concurrent auction + origin (2.2)** — Not applicable for golf.com. Defer to a separate ticket? +6. **GzEncoder `finish()` correctness** — Fix the `drop(encoder)` error swallowing in `process_through_compression`, or accept the risk? diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index a01865f6..c43ed8a3 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -51,6 +51,7 @@ pub mod openrtb; pub mod proxy; pub mod publisher; pub mod request_signing; +pub mod request_timer; pub mod rsc_flight; pub mod settings; pub mod settings_data; diff --git a/crates/common/src/publisher.rs b/crates/common/src/publisher.rs index fb160f3d..c6b465d5 100644 --- a/crates/common/src/publisher.rs +++ b/crates/common/src/publisher.rs @@ -6,6 +6,7 @@ use crate::backend::BackendConfig; use crate::http_util::{serve_static_with_etag, RequestInfo}; use crate::constants::{COOKIE_SYNTHETIC_ID, HEADER_X_COMPRESS_HINT, HEADER_X_SYNTHETIC_ID}; +use crate::request_timer::RequestTimer; use crate::cookies::create_synthetic_cookie; use crate::error::TrustedServerError; use crate::integrations::IntegrationRegistry; @@ -176,6 +177,7 @@ pub fn handle_publisher_request( settings: &Settings, integration_registry: &IntegrationRegistry, mut req: Request, + timer: &mut RequestTimer, ) -> Result> { log::debug!("Proxying request to publisher_origin"); @@ -235,6 +237,8 @@ pub fn handle_publisher_request( message: "Failed to proxy request to origin".to_string(), })?; + timer.mark_backend(); + // Log all response headers for debugging log::debug!("Response headers:"); for (name, value) in response.get_headers() { @@ -310,6 +314,8 @@ pub fn handle_publisher_request( ); } + timer.mark_process(); + response.set_header(HEADER_X_SYNTHETIC_ID, synthetic_id.as_str()); if !has_synthetic_cookie { response.set_header( diff --git a/crates/common/src/request_timer.rs b/crates/common/src/request_timer.rs new file mode 100644 index 00000000..ca3d07a3 --- /dev/null +++ b/crates/common/src/request_timer.rs @@ -0,0 +1,151 @@ +//! Lightweight per-request timer for profiling the Fastly Compute request lifecycle. +//! +//! Records phase durations using [`std::time::Instant`] and emits them as a +//! [`Server-Timing`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Server-Timing) +//! header value so that timings are visible in browser `DevTools` (Network → Timing tab). + +use std::time::Instant; + +/// Records wall-clock durations for each phase of a request. +/// +/// Usage: +/// ```ignore +/// let mut timer = RequestTimer::new(); // captures t0 +/// // ... init work ... +/// timer.mark_init(); // captures init duration +/// // ... backend fetch ... +/// timer.mark_backend(); // captures backend duration +/// // ... body processing ... +/// timer.mark_process(); // captures process duration +/// response.set_header("Server-Timing", timer.header_value()); +/// ``` +pub struct RequestTimer { + start: Instant, + init_ms: Option, + backend_ms: Option, + process_ms: Option, + last_mark: Instant, +} + +impl RequestTimer { + /// Start a new timer. Call this as early as possible in `main()`. + #[must_use] + pub fn new() -> Self { + let now = Instant::now(); + Self { + start: now, + init_ms: None, + backend_ms: None, + process_ms: None, + last_mark: now, + } + } + + /// Mark the end of the initialization phase (settings, orchestrator, registry). + pub fn mark_init(&mut self) { + let now = Instant::now(); + self.init_ms = Some(duration_ms(self.last_mark, now)); + self.last_mark = now; + } + + /// Mark the end of the backend fetch phase. + pub fn mark_backend(&mut self) { + let now = Instant::now(); + self.backend_ms = Some(duration_ms(self.last_mark, now)); + self.last_mark = now; + } + + /// Mark the end of body processing (decompress, rewrite, recompress). + pub fn mark_process(&mut self) { + let now = Instant::now(); + self.process_ms = Some(duration_ms(self.last_mark, now)); + self.last_mark = now; + } + + /// Total elapsed time since the timer was created. + #[must_use] + pub fn total_ms(&self) -> f64 { + duration_ms(self.start, Instant::now()) + } + + /// Format as a `Server-Timing` header value. + /// + /// Example output: + /// `init;dur=1.2, backend;dur=385.4, process;dur=12.3, total;dur=401.5` + #[must_use] + pub fn header_value(&self) -> String { + let mut parts = Vec::with_capacity(4); + + if let Some(ms) = self.init_ms { + parts.push(format!("init;dur={ms:.1}")); + } + if let Some(ms) = self.backend_ms { + parts.push(format!("backend;dur={ms:.1}")); + } + if let Some(ms) = self.process_ms { + parts.push(format!("process;dur={ms:.1}")); + } + + parts.push(format!("total;dur={:.1}", self.total_ms())); + parts.join(", ") + } + + /// Format a single-line log string for Fastly logs. + #[must_use] + pub fn log_line(&self) -> String { + format!( + "RequestTimer: init={:.1}ms backend={:.1}ms process={:.1}ms total={:.1}ms", + self.init_ms.unwrap_or(0.0), + self.backend_ms.unwrap_or(0.0), + self.process_ms.unwrap_or(0.0), + self.total_ms(), + ) + } +} + +impl Default for RequestTimer { + fn default() -> Self { + Self::new() + } +} + +fn duration_ms(from: Instant, to: Instant) -> f64 { + to.duration_since(from).as_secs_f64() * 1000.0 +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn header_value_includes_all_phases() { + let mut timer = RequestTimer::new(); + timer.mark_init(); + timer.mark_backend(); + timer.mark_process(); + + let header = timer.header_value(); + assert!(header.contains("init;dur="), "missing init phase"); + assert!(header.contains("backend;dur="), "missing backend phase"); + assert!(header.contains("process;dur="), "missing process phase"); + assert!(header.contains("total;dur="), "missing total phase"); + } + + #[test] + fn header_value_omits_unmarked_phases() { + let timer = RequestTimer::new(); + let header = timer.header_value(); + assert!(!header.contains("init;dur=")); + assert!(!header.contains("backend;dur=")); + assert!(header.contains("total;dur=")); + } + + #[test] + fn log_line_uses_zero_for_unmarked() { + let timer = RequestTimer::new(); + let log = timer.log_line(); + assert!(log.contains("init=0.0ms")); + assert!(log.contains("backend=0.0ms")); + assert!(log.contains("process=0.0ms")); + } +} diff --git a/crates/fastly/src/main.rs b/crates/fastly/src/main.rs index 0112bd99..a060c400 100644 --- a/crates/fastly/src/main.rs +++ b/crates/fastly/src/main.rs @@ -17,6 +17,7 @@ use trusted_server_common::request_signing::{ handle_deactivate_key, handle_rotate_key, handle_trusted_server_discovery, handle_verify_signature, }; +use trusted_server_common::request_timer::RequestTimer; use trusted_server_common::settings::Settings; use trusted_server_common::settings_data::get_settings; @@ -25,6 +26,8 @@ use crate::error::to_error_response; #[fastly::main] fn main(req: Request) -> Result { + let mut timer = RequestTimer::new(); + init_logger(); let settings = match get_settings() { @@ -47,11 +50,14 @@ fn main(req: Request) -> Result { } }; + timer.mark_init(); + futures::executor::block_on(route_request( &settings, &orchestrator, &integration_registry, req, + &mut timer, )) } @@ -60,6 +66,7 @@ async fn route_request( orchestrator: &AuctionOrchestrator, integration_registry: &IntegrationRegistry, req: Request, + timer: &mut RequestTimer, ) -> Result { log::info!( "FASTLY_SERVICE_VERSION: {}", @@ -119,7 +126,7 @@ async fn route_request( path ); - match handle_publisher_request(settings, integration_registry, req) { + match handle_publisher_request(settings, integration_registry, req, timer) { Ok(response) => Ok(response), Err(e) => { log::error!("Failed to proxy to publisher origin: {:?}", e); @@ -136,6 +143,9 @@ async fn route_request( response.set_header(key, value); } + log::info!("{}", timer.log_line()); + response.set_header("Server-Timing", timer.header_value()); + Ok(response) } diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh new file mode 100755 index 00000000..8428f36c --- /dev/null +++ b/scripts/benchmark.sh @@ -0,0 +1,516 @@ +#!/usr/bin/env bash +# +# Trusted Server Performance Benchmark +# +# Measures request latency against a running Viceroy instance. +# Run this on main, save the output, then run on your branch and compare. +# +# Prerequisites: +# - Viceroy running: fastly compute serve +# - hey installed: brew install hey +# +# Usage: +# ./scripts/benchmark.sh # Run all benchmarks +# ./scripts/benchmark.sh --cold-start # Cold start analysis only +# ./scripts/benchmark.sh --load-test # Load test only +# ./scripts/benchmark.sh --quick # Quick smoke test (fewer requests) +# ./scripts/benchmark.sh --profile # Server-Timing phase breakdown (init/backend/process) +# ./scripts/benchmark.sh --save baseline # Save results to file +# ./scripts/benchmark.sh --compare baseline # Compare against saved results +# +# What this measures: +# - Cold start: first request latency after server restart +# - Warm latency: subsequent request timing breakdown (DNS, connect, TTFB, transfer, total) +# - Throughput: requests/sec under concurrent load +# - Latency distribution: p50, p95, p99 under load +# +# What this does NOT measure: +# - Real Fastly edge performance (Viceroy is a simulator) +# - Network latency to real backends +# - Production TLS handshake overhead +# - WASM cold start on actual Fastly infrastructure +# +# The value is in RELATIVE comparison between branches, not absolute numbers. + +set -euo pipefail + +# --- Configuration --- +BASE_URL="${BENCH_URL:-http://127.0.0.1:7676}" +RESULTS_DIR="$(cd "$(dirname "$0")/.." && pwd)/benchmark-results" +CURL_FORMAT=' +{ + "dns_ms": %{time_namelookup}, + "connect_ms": %{time_connect}, + "tls_ms": %{time_appconnect}, + "ttfb_ms": %{time_starttransfer}, + "total_ms": %{time_total}, + "size_bytes": %{size_download}, + "http_code": %{http_code} +}' + +# Colors (disabled if not a terminal) +if [ -t 1 ]; then + BOLD='\033[1m' + GREEN='\033[0;32m' + YELLOW='\033[0;33m' + RED='\033[0;31m' + CYAN='\033[0;36m' + RESET='\033[0m' +else + BOLD='' GREEN='' YELLOW='' RED='' CYAN='' RESET='' +fi + +# --- Helpers --- + +log_header() { + echo "" + echo -e "${BOLD}${CYAN}=== $1 ===${RESET}" + echo "" +} + +log_info() { + echo -e "${GREEN}[INFO]${RESET} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${RESET} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${RESET} $1" +} + +check_server() { + if ! curl -s -o /dev/null -w "%{http_code}" "$BASE_URL/" --max-time 2 > /dev/null 2>&1; then + log_error "Server not responding at $BASE_URL" + log_error "Start it first: fastly compute serve" + exit 1 + fi + log_info "Server responding at $BASE_URL" +} + +check_hey() { + if ! command -v hey &> /dev/null; then + log_warn "'hey' not installed. Attempting to install via brew..." + if command -v brew &> /dev/null; then + brew install hey + if ! command -v hey &> /dev/null; then + log_error "Failed to install 'hey'. Load tests will be skipped." + return 1 + fi + log_info "'hey' installed successfully." + else + log_error "'brew' not found. Install hey manually: https://github.com/rakyll/hey" + log_error "Load tests will be skipped." + return 1 + fi + fi + return 0 +} + +# Timed curl request — prints JSON timing breakdown +timed_curl() { + local method="$1" + local url="$2" + local label="$3" + shift 3 + local extra_args=("$@") + + local result + result=$(curl -s -o /dev/null -w "$CURL_FORMAT" \ + -X "$method" \ + ${extra_args[@]+"${extra_args[@]}"} \ + "$url" \ + --max-time 30 2>/dev/null) + + local total + total=$(echo "$result" | grep total_ms | tr -d '", ' | cut -d: -f2) + local ttfb + ttfb=$(echo "$result" | grep ttfb_ms | tr -d '", ' | cut -d: -f2) + local code + code=$(echo "$result" | grep http_code | tr -d '", ' | cut -d: -f2) + local size + size=$(echo "$result" | grep size_bytes | tr -d '", ' | cut -d: -f2) + + # Convert seconds to ms for display + local total_ms ttfb_ms + total_ms=$(echo "$total * 1000" | bc 2>/dev/null || echo "$total") + ttfb_ms=$(echo "$ttfb * 1000" | bc 2>/dev/null || echo "$ttfb") + + printf " %-40s HTTP %s TTFB: %8.2f ms Total: %8.2f ms Size: %s bytes\n" \ + "$label" "$code" "$ttfb_ms" "$total_ms" "$size" + + echo "$result" +} + +# --- Test Data --- + +AUCTION_PAYLOAD='{ + "adUnits": [ + { + "code": "header-banner", + "mediaTypes": { + "banner": { + "sizes": [[728, 90], [970, 250]] + } + } + }, + { + "code": "sidebar", + "mediaTypes": { + "banner": { + "sizes": [[300, 250], [300, 600]] + } + } + } + ] +}' + +# --- Benchmark Suites --- + +run_cold_start() { + log_header "COLD START ANALYSIS" + log_info "Measuring first-request latency (simulated via sequential requests)" + log_info "In production, cold start includes WASM instantiation which Viceroy may not reflect." + echo "" + + echo -e "${BOLD}First request (potential cold path):${RESET}" + timed_curl GET "$BASE_URL/" "GET / (first)" > /dev/null + + echo "" + echo -e "${BOLD}Subsequent requests (warm path):${RESET}" + for i in 1 2 3 4 5; do + timed_curl GET "$BASE_URL/" "GET / (warm #$i)" > /dev/null + done +} + +run_endpoint_latency() { + log_header "ENDPOINT LATENCY (WARM)" + log_info "Per-endpoint timing breakdown (5 requests each, reporting median-ish)" + echo "" + + local endpoints=( + "GET|/|Publisher proxy (fallback)" + "GET|/static/tsjs=tsjs-unified.min.js|Static JS bundle" + "GET|/.well-known/trusted-server.json|Discovery endpoint" + ) + + for entry in "${endpoints[@]}"; do + IFS='|' read -r method path label <<< "$entry" + echo -e "${BOLD}$label${RESET} ($method $path)" + + for i in $(seq 1 5); do + timed_curl "$method" "${BASE_URL}${path}" " request #$i" > /dev/null + done + echo "" + done + + # Auction endpoint (POST with body) + echo -e "${BOLD}Auction endpoint${RESET} (POST /auction)" + for i in $(seq 1 5); do + timed_curl POST "${BASE_URL}/auction" " request #$i" \ + -H "Content-Type: application/json" \ + -d "$AUCTION_PAYLOAD" > /dev/null + done + echo "" +} + +run_load_test() { + if ! check_hey; then + return + fi + + log_header "LOAD TEST" + log_info "Concurrent request throughput and latency distribution" + echo "" + + local total_requests="${1:-200}" + local concurrency="${2:-10}" + + echo -e "${BOLD}GET / (publisher proxy) - ${total_requests} requests, ${concurrency} concurrent${RESET}" + echo "" + hey -n "$total_requests" -c "$concurrency" -t 30 "$BASE_URL/" 2>&1 | \ + grep -E "(Requests/sec|Total:|Slowest:|Fastest:|Average:|requests done)|Status code|Latency distribution" -A 20 + echo "" + + echo -e "${BOLD}GET /static/tsjs=tsjs-unified.min.js (static) - ${total_requests} requests, ${concurrency} concurrent${RESET}" + echo "" + hey -n "$total_requests" -c "$concurrency" -t 30 "$BASE_URL/static/tsjs=tsjs-unified.min.js" 2>&1 | \ + grep -E "(Requests/sec|Total:|Slowest:|Fastest:|Average:|requests done)|Status code|Latency distribution" -A 20 + echo "" + + echo -e "${BOLD}POST /auction - ${total_requests} requests, ${concurrency} concurrent${RESET}" + echo "" + hey -n "$total_requests" -c "$concurrency" -t 30 \ + -m POST \ + -H "Content-Type: application/json" \ + -d "$AUCTION_PAYLOAD" \ + "$BASE_URL/auction" 2>&1 | \ + grep -E "(Requests/sec|Total:|Slowest:|Fastest:|Average:|requests done)|Status code|Latency distribution" -A 20 + echo "" +} + +run_first_byte_analysis() { + log_header "TIME TO FIRST BYTE (TTFB) ANALYSIS" + log_info "Measures TTFB across 20 sequential requests to detect patterns" + log_info "Look for: first request significantly slower than rest = cold start" + echo "" + + echo -e "${BOLD}Sequential TTFB for GET / :${RESET}" + echo "" + printf " %-8s %-12s %-12s\n" "Request" "TTFB (ms)" "Total (ms)" + printf " %-8s %-12s %-12s\n" "-------" "---------" "----------" + + for i in $(seq 1 20); do + local result + result=$(curl -s -o /dev/null -w "%{time_starttransfer} %{time_total}" \ + "$BASE_URL/" --max-time 30 2>/dev/null) + local ttfb total + ttfb=$(echo "$result" | awk '{printf "%.2f", $1 * 1000}') + total=$(echo "$result" | awk '{printf "%.2f", $2 * 1000}') + printf " %-8s %-12s %-12s\n" "#$i" "${ttfb}" "${total}" + done + echo "" +} + +# --- Server-Timing Profiler --- + +# Parse "init;dur=1.2, backend;dur=385.4, process;dur=12.3, total;dur=401.5" +# into associative-style variables: st_init=1.2, st_backend=385.4, etc. +parse_server_timing() { + local header="$1" + st_init="" st_backend="" st_process="" st_total="" + for part in $(echo "$header" | tr ',' '\n'); do + local name dur + name=$(echo "$part" | sed 's/;.*//' | tr -d ' ') + dur=$(echo "$part" | grep -o 'dur=[0-9.]*' | cut -d= -f2) + case "$name" in + init) st_init="$dur" ;; + backend) st_backend="$dur" ;; + process) st_process="$dur" ;; + total) st_total="$dur" ;; + esac + done +} + +# Collect Server-Timing data over N requests and print stats +# Also captures external TTFB and total (TTLB) for streaming comparison +profile_endpoint() { + local method="$1" + local url="$2" + local label="$3" + local iterations="${4:-20}" + shift 4 + local extra_args=("$@") + + local init_vals=() backend_vals=() process_vals=() total_vals=() + local ttfb_vals=() ttlb_vals=() + + for i in $(seq 1 "$iterations"); do + # Capture both Server-Timing header and curl timing in one request + local raw + raw=$(curl -s -D- -o /dev/null \ + -w '\n__CURL_TIMING__ %{time_starttransfer} %{time_total}' \ + -X "$method" \ + ${extra_args[@]+"${extra_args[@]}"} \ + "$url" \ + --max-time 30 2>/dev/null) + + # Extract Server-Timing header + local header + header=$(echo "$raw" | grep -i '^server-timing:' | sed 's/[Ss]erver-[Tt]iming: *//') + + # Extract curl timing (TTFB and total in seconds) + local curl_timing + curl_timing=$(echo "$raw" | grep '__CURL_TIMING__' | sed 's/__CURL_TIMING__ //') + if [ -n "$curl_timing" ]; then + local ext_ttfb ext_total + ext_ttfb=$(echo "$curl_timing" | awk '{printf "%.1f", $1 * 1000}') + ext_total=$(echo "$curl_timing" | awk '{printf "%.1f", $2 * 1000}') + ttfb_vals+=("$ext_ttfb") + ttlb_vals+=("$ext_total") + fi + + if [ -z "$header" ]; then + continue + fi + + parse_server_timing "$header" + [ -n "$st_init" ] && init_vals+=("$st_init") + [ -n "$st_backend" ] && backend_vals+=("$st_backend") + [ -n "$st_process" ] && process_vals+=("$st_process") + [ -n "$st_total" ] && total_vals+=("$st_total") + done + + echo -e " ${BOLD}$label${RESET} ($method, $iterations iterations)" + echo "" + printf " %-12s %8s %8s %8s %8s\n" "Phase" "Min" "Avg" "Max" "P95" + printf " %-12s %8s %8s %8s %8s\n" "----------" "------" "------" "------" "------" + print_stats "init" "${init_vals[@]}" + print_stats "backend" "${backend_vals[@]}" + print_stats "process" "${process_vals[@]}" + print_stats "total" "${total_vals[@]}" + echo "" + echo -e " ${BOLD}External timing (curl):${RESET}" + printf " %-12s %8s %8s %8s %8s\n" "Metric" "Min" "Avg" "Max" "P95" + printf " %-12s %8s %8s %8s %8s\n" "----------" "------" "------" "------" "------" + print_stats "TTFB" "${ttfb_vals[@]}" + print_stats "TTLB" "${ttlb_vals[@]}" + echo "" +} + +# Compute min/avg/max/p95 from a list of floats +print_stats() { + local name="$1" + shift + local vals=("$@") + local count=${#vals[@]} + + if [ "$count" -eq 0 ]; then + printf " %-12s %8s %8s %8s %8s\n" "$name" "-" "-" "-" "-" + return + fi + + # Sort values + local sorted + sorted=$(printf '%s\n' "${vals[@]}" | sort -g) + + local min avg max p95 + min=$(echo "$sorted" | head -1) + max=$(echo "$sorted" | tail -1) + + local sum + sum=$(printf '%s\n' "${vals[@]}" | awk '{s+=$1} END {printf "%.1f", s}') + avg=$(echo "$sum $count" | awk '{printf "%.1f", $1/$2}') + + local p95_idx + p95_idx=$(echo "$count" | awk '{printf "%d", int($1 * 0.95 + 0.5)}') + [ "$p95_idx" -lt 1 ] && p95_idx=1 + p95=$(echo "$sorted" | sed -n "${p95_idx}p") + + printf " %-12s %7.1f %7.1f %7.1f %7.1f\n" "$name" "$min" "$avg" "$max" "$p95" +} + +run_profile() { + local iterations="${1:-20}" + + log_header "SERVER-TIMING PROFILE" + log_info "Collecting Server-Timing header data over $iterations requests per endpoint" + log_info "Phases: init (setup) → backend (origin fetch) → process (body rewrite) → total" + echo "" + + profile_endpoint GET "$BASE_URL/static/tsjs=tsjs-unified.min.js" \ + "Static JS bundle" "$iterations" + + profile_endpoint GET "$BASE_URL/.well-known/trusted-server.json" \ + "Discovery endpoint" "$iterations" + + profile_endpoint GET "$BASE_URL/" \ + "Publisher proxy (fallback)" "$iterations" + + profile_endpoint POST "$BASE_URL/auction" \ + "Auction endpoint" "$iterations" \ + -H "Content-Type: application/json" \ + -d "$AUCTION_PAYLOAD" +} + +save_results() { + local name="$1" + mkdir -p "$RESULTS_DIR" + local outfile="$RESULTS_DIR/${name}.txt" + + log_info "Saving results to $outfile" + + { + echo "# Benchmark Results: $name" + echo "# Date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" + echo "# Git: $(git -C "$(dirname "$0")/.." rev-parse --short HEAD 2>/dev/null || echo 'unknown')" + echo "# Branch: $(git -C "$(dirname "$0")/.." branch --show-current 2>/dev/null || echo 'unknown')" + echo "" + run_all 2>&1 + } > "$outfile" + + log_info "Results saved. Compare later with: diff $RESULTS_DIR/baseline.txt $RESULTS_DIR/branch.txt" +} + +compare_results() { + local name="$1" + local baseline="$RESULTS_DIR/${name}.txt" + + if [ ! -f "$baseline" ]; then + log_error "No saved results found at $baseline" + log_error "Run with --save $name first" + exit 1 + fi + + local current + current=$(mktemp) + run_all 2>&1 > "$current" + + log_header "COMPARISON: current vs $name" + diff --color=auto -u "$baseline" "$current" || true + rm -f "$current" +} + +run_all() { + echo -e "${BOLD}Trusted Server Performance Benchmark${RESET}" + echo "Date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" + echo "Git: $(git -C "$(dirname "$0")/.." rev-parse --short HEAD 2>/dev/null || echo 'unknown')" + echo "Branch: $(git -C "$(dirname "$0")/.." branch --show-current 2>/dev/null || echo 'unknown')" + echo "Server: $BASE_URL" + + run_cold_start + run_first_byte_analysis + run_endpoint_latency + run_load_test 200 10 +} + +run_quick() { + echo -e "${BOLD}Trusted Server Performance Benchmark (Quick)${RESET}" + echo "Date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" + echo "Git: $(git -C "$(dirname "$0")/.." rev-parse --short HEAD 2>/dev/null || echo 'unknown')" + echo "Server: $BASE_URL" + + run_first_byte_analysis + run_load_test 50 5 +} + +# --- Main --- + +main() { + local mode="${1:-all}" + + check_server + + case "$mode" in + --cold-start) + run_cold_start + ;; + --load-test) + run_load_test "${2:-200}" "${3:-10}" + ;; + --quick) + run_quick + ;; + --ttfb) + run_first_byte_analysis + ;; + --profile) + run_profile "${2:-20}" + ;; + --save) + save_results "${2:?Usage: --save }" + ;; + --compare) + compare_results "${2:?Usage: --compare }" + ;; + --help|-h) + head -30 "$0" | grep '^#' | sed 's/^# \?//' + ;; + *) + run_all + ;; + esac +} + +main "$@" From ad993f554c141177deb80f0788f09e8f26b757b3 Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Wed, 18 Feb 2026 21:33:39 +0530 Subject: [PATCH 2/7] Add Server-Timing instrumentation and optimization plan Introduce RequestTimer for per-request phase tracking (init, backend, process, total) exposed via Server-Timing response headers. Add benchmark tooling with --profile mode for collecting timing data. Document phased optimization plan covering streaming architecture, code-level fixes, and open design questions for team review. --- crates/common/src/publisher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/common/src/publisher.rs b/crates/common/src/publisher.rs index c6b465d5..0aa2d648 100644 --- a/crates/common/src/publisher.rs +++ b/crates/common/src/publisher.rs @@ -6,10 +6,10 @@ use crate::backend::BackendConfig; use crate::http_util::{serve_static_with_etag, RequestInfo}; use crate::constants::{COOKIE_SYNTHETIC_ID, HEADER_X_COMPRESS_HINT, HEADER_X_SYNTHETIC_ID}; -use crate::request_timer::RequestTimer; use crate::cookies::create_synthetic_cookie; use crate::error::TrustedServerError; use crate::integrations::IntegrationRegistry; +use crate::request_timer::RequestTimer; use crate::rsc_flight::RscFlightUrlRewriter; use crate::settings::Settings; use crate::streaming_processor::{Compression, PipelineConfig, StreamProcessor, StreamingPipeline}; From 4d904690dca99ce201467a4669c4a4748e65ae9e Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Wed, 18 Feb 2026 21:59:14 +0530 Subject: [PATCH 3/7] Downgrade Settings debug dump to log::debug and tighten max_level to Info --- OPTIMIZATION.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/OPTIMIZATION.md b/OPTIMIZATION.md index 817696d7..72a5494f 100644 --- a/OPTIMIZATION.md +++ b/OPTIMIZATION.md @@ -196,13 +196,22 @@ impl StreamProcessor for HtmlRewriterAdapter { |--------|-----|------| | **High** (HTML is most common content type; eliminates 222KB+ buffer) | ~30 refactored | Medium — needs test coverage | -#### 1.3 Remove verbose per-request logging +#### 1.3 Reduce verbose per-request logging -**Files**: `crates/fastly/src/main.rs:37,64-67` +**Files**: `crates/fastly/src/main.rs:37,64-67,152-177` -**Problem**: `log::info!("Settings {settings:?}")` serializes the entire Settings struct (~2KB) on every request. `FASTLY_SERVICE_VERSION` env var logged at info level. +**Problem**: `log::info!("Settings {settings:?}")` serializes the entire Settings struct (~2KB) on every request. `FASTLY_SERVICE_VERSION` env var logged at info level. The logger is configured with `max_level(LevelFilter::Debug)`, meaning every `debug!` and above is evaluated. -**Fix**: Remove settings dump or gate behind `log::debug!`. +**Fix**: Downgrade the Settings dump to `log::debug!` and tighten the logger's `max_level` to `LevelFilter::Info` for production. The `log_fastly` crate supports `filter_module()` for per-module levels if we still want debug output from specific modules. When the level is filtered, `log` macros short-circuit before evaluating arguments — so the `Settings` `Debug` format is never even computed. + +```rust +// Before: everything at Debug and above is serialized +.max_level(log::LevelFilter::Debug) + +// After: Info in production, debug only for specific modules if needed +.max_level(log::LevelFilter::Info) +// Optional: .filter_module("trusted_server", log::LevelFilter::Debug) +``` | Impact | LOC | Risk | |--------|-----|------| From 01d77e4b95caab5c868b6f7cf82d10348241e9e5 Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Thu, 19 Feb 2026 17:17:17 +0530 Subject: [PATCH 4/7] Remove Server-Timing instrumentation and clean up benchmark tooling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RequestTimer and Server-Timing header were premature — WASM guest profiling via profile.sh gives better per-function visibility without runtime overhead. Also strips dead --profile mode from benchmark.sh. --- .gitignore | 3 + OPTIMIZATION.md | 175 ++++++++++++------- crates/common/src/lib.rs | 2 +- crates/common/src/publisher.rs | 7 +- crates/common/src/request_timer.rs | 151 ---------------- crates/fastly/src/main.rs | 12 +- scripts/benchmark.sh | 146 +--------------- scripts/profile.sh | 269 +++++++++++++++++++++++++++++ 8 files changed, 386 insertions(+), 379 deletions(-) delete mode 100644 crates/common/src/request_timer.rs create mode 100755 scripts/profile.sh diff --git a/.gitignore b/.gitignore index af8ff99b..1bd7e773 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,6 @@ src/*.html # SSL certificates *.pem + +/guest-profiles +/benchmark-results/** \ No newline at end of file diff --git a/OPTIMIZATION.md b/OPTIMIZATION.md index 72a5494f..77e7df45 100644 --- a/OPTIMIZATION.md +++ b/OPTIMIZATION.md @@ -4,11 +4,10 @@ | Item | Status | |------|--------| -| Production timing instrumentation (`RequestTimer`) | **Implemented** (on `feat/optimize-ts`, not yet deployed) | -| Benchmark tooling (`scripts/benchmark.sh`) | **Implemented** (includes `--profile` mode) | +| Benchmark tooling (`scripts/benchmark.sh`) | **Implemented** | +| WASM guest profiling (`scripts/profile.sh`) | **Implemented** (flame graphs via `--profile-guest`) | | Viceroy baseline measurements | **Complete** | | Staging external TTFB baseline | **Complete** (against staging deployment) | -| Server-Timing production data | **Blocked** — needs `feat/optimize-ts` deployed to staging | | Streaming architecture (`stream_to_client`) | **Planned** — see Phase 2 | | Code-level optimizations | **Planned** — see Phase 1 | @@ -35,19 +34,33 @@ streaming.finish()?; This changes the optimization strategy — **time-to-last-byte (TTLB) and peak memory CAN be significantly reduced**. TTFB itself is still gated by the Fastly platform floor (~200ms) plus backend response time, but body bytes start reaching the client as soon as the first chunk is processed instead of waiting for the entire response to be buffered. -### Compatibility with `#[fastly::main]` — NEEDS SPIKE +### Compatibility with `#[fastly::main]` — use undecorated `main()` (recommended) -`stream_to_client()` consumes the Response and starts sending. The Fastly SDK enforces that only **one response** is sent per request via `assert_single_downstream_response_is_sent()`. The `#[fastly::main]` macro wraps your function and calls `send_to_client()` on the returned `Response` — so if `stream_to_client()` was already called, the macro **will trigger a panic**. +For streaming final responses, the Fastly SDK docs already define the intended pattern: -**This is an unresolved design problem that needs a prototype before committing to the streaming architecture.** +- `Request::from_client()` docs explicitly state it is incompatible with `#[fastly::main]` and recommend an undecorated `main()` with explicit response sending. +- `Response::send_to_client()` / `Response::stream_to_client()` include the same compatibility guidance. +- `fastly::init()` is public (doc-hidden) and can be called from raw `main()` to initialize the ABI. -Possible approaches (all need validation): +This means approach #1 is the correct architecture for streaming paths, and approaches like `std::process::exit(0)` or sentinel responses are unnecessary. -1. **Drop the `#[fastly::main]` macro** — write a raw `main()` that handles the Fastly request lifecycle manually, giving full control over when/how the response is sent. Need to verify the Fastly SDK supports this. -2. **Use `std::process::exit(0)` after streaming** — call `stream_to_client()`, process, `finish()`, then `exit(0)` before the macro gets a chance to call `send_to_client()`. Ugly, but may work if Fastly doesn't require cleanup. -3. **Return a sentinel Response** — investigate whether the SDK treats an already-sent response as a no-op instead of panicking. (Current source code suggests it **does** panic — needs testing on actual Fastly Compute, not just source reading.) +Recommended shape: -**Action item**: Create a minimal Fastly Compute service that calls `stream_to_client()` and test each approach. This spike should be done **before** any Phase 2 implementation work. +```rust +fn main() -> Result<(), fastly::Error> { + fastly::init(); + let req = fastly::Request::from_client(); + + match route_request(req)? { + Some(resp) => resp.send_to_client(), // non-streaming path + None => {} // streaming path already sent + finished + } + + Ok(()) +} +``` + +**Action item**: Do a focused spike on real Fastly Compute to validate runtime behavior (no double-send panics across mixed routes, proper error behavior for partially streamed responses, and observability expectations). The API viability question is resolved. Non-streaming endpoints (static JS, discovery, auction) continue returning `Response` normally. Only the publisher proxy path (the hot path) would use streaming. @@ -86,45 +99,31 @@ Measured on `main` branch. Value is in **relative comparison between branches**, ### Staging (External) -Measured externally against staging deployment (golf.com proxy), `main` branch (no Server-Timing deployed yet). +Measured externally against staging deployment (golf.com proxy), `main` branch. | Endpoint | TTFB | Total | Size | Notes | |---|---|---|---|---| -| `GET /static/tsjs=tsjs-unified.min.js` | ~204 ms | ~219 ms | 28 KB | No backend, pure platform overhead | +| `GET /static/tsjs=tsjs-unified.min.js` | ~204 ms | ~219 ms | 28 KB | No backend; includes client-network + edge path from benchmark vantage | | `GET /` (publisher proxy, golf.com) | ~234 ms | ~441 ms | 230 KB | Backend + processing | | `GET /.well-known/trusted-server.json` | ~191 ms | - | - | Returns 500 (needs investigation) | -**Key insight**: Static JS has ~204ms TTFB with zero backend work. This is the **Fastly platform floor** (WASM instantiation + edge routing + TLS). Application code cannot reduce this. The publisher proxy adds only ~30ms TTFB on top — but the full ~441ms total includes waiting for the entire response to be buffered before sending. With streaming, the ~207ms gap between TTFB (234ms) and TTLB (441ms) would shrink because body bytes stream as they're processed instead of being fully buffered. +**Key insight**: Static JS has ~204ms TTFB with zero backend work **from this specific benchmark vantage point**. That number includes client-to-edge RTT, DNS, TLS/connection state, and edge processing; it is **not** a universal Fastly floor. `WASM` instantiation can contribute on cold paths, but warm requests from clients near a POP can be much lower. + +For this dataset, treat static TTFB as an environment baseline and compare deltas: the publisher proxy adds only ~30ms TTFB on top. The larger optimization target is the TTFB→TTLB gap (~207ms here), which streaming can shrink by sending body chunks as they are processed instead of waiting for full buffering. --- ## Implementation Plan -### Phase 0: Deploy Server-Timing Instrumentation (DONE, needs deploy) +### Phase 0: Tooling and Baselines (DONE) **Branch**: `feat/optimize-ts` -Already implemented: -- `RequestTimer` in `crates/common/src/request_timer.rs` — tracks `init`, `backend`, `process`, `total` phases -- `Server-Timing` header emitted on every response -- Wired into `main.rs` and `publisher.rs` -- `scripts/benchmark.sh --profile` mode to collect and report Server-Timing data - -**Action**: Deploy `feat/optimize-ts` to staging, then run: -```bash -BENCH_URL=https://.edgecompute.app ./scripts/benchmark.sh --profile -``` - -This gives us the real `init`/`backend`/`process` split for golf.com requests. - -**Conflict with Phase 2 streaming**: The `Server-Timing` header is currently set **after** response processing completes (line 147 of `main.rs`), which includes `backend` and `process` phase durations. When we switch to `stream_to_client()` in Phase 2, headers are sent **before** processing starts — so `Server-Timing` cannot include the `backend`/`process`/`total` values. - -Options for Phase 2: -- **Move Server-Timing to a trailer** — `StreamingBody` supports trailers via `StreamingBodyExt::append_trailer()`. Requires client support (browsers generally ignore trailers). -- **Log-only instrumentation** — keep `RequestTimer` for server-side logging but don't include it in response headers on streaming paths. Non-streaming endpoints still get the header. -- **Keep both paths** — Phase 0 instrumentation continues working on the buffered path. Once streaming is validated and deployed, accept that Server-Timing headers are only available for non-streaming endpoints (static, auction, discovery). - -This is **not a blocker** for Phase 0 — the instrumentation is valuable right now on the current buffered architecture. Just be aware it will need adjustment when streaming lands. +Completed: +- `scripts/benchmark.sh` — HTTP load testing with TTFB analysis, cold start detection, endpoint latency breakdown +- `scripts/profile.sh` — WASM guest profiling via `fastly compute serve --profile-guest`, outputs Firefox Profiler-compatible flame graphs +- Viceroy baseline measurements (see tables above) +- Staging external TTFB baseline --- @@ -217,7 +216,46 @@ impl StreamProcessor for HtmlRewriterAdapter { |--------|-----|------| | Medium-High | ~3 | None | -#### 1.4 Trivial fixes batch +#### 1.4 Eliminate redundant `config` crate parsing in `get_settings()` — **22% CPU** + +**Files**: `crates/common/src/settings_data.rs`, `crates/common/src/settings.rs` + +**Problem**: Flame graph profiling shows `get_settings()` consuming ~22% of per-request CPU. The `build.rs` already merges `trusted-server.toml` + all `TRUSTED_SERVER__*` env vars at compile time and writes a fully-resolved TOML file to `target/trusted-server-out.toml`. But at runtime, `get_settings()` calls `Settings::from_toml()`, which re-runs the entire `config` crate pipeline — `Config::builder().add_source(File).add_source(Environment).build().try_deserialize()` — redundantly scanning env vars and merging sources that were already resolved at build time. + +**Root cause**: `settings_data.rs` embeds the build-time-resolved TOML via `include_bytes!`, then hands it to `from_toml()` which treats it as a raw config source and re-layers env vars on top. + +**Fix**: Replace `Settings::from_toml()` with direct `toml::from_str()` in `get_settings()`. The embedded TOML is already fully resolved — no `config` crate needed at runtime. + +```rust +// Before (22% CPU — re-runs config crate pipeline + env var scan) +let settings = Settings::from_toml(toml_str)?; + +// After (near-instant — just TOML deserialization) +let settings: Settings = toml::from_str(toml_str) + .change_context(TrustedServerError::Configuration { + message: "Failed to deserialize embedded config".to_string(), + })?; +``` + +**Alternative — binary serialization for near-zero cost**: Since `build.rs` already has a fully constructed `Settings` struct, it could serialize to `postcard` (a `no_std`-compatible, WASM-safe binary format). Runtime deserialization becomes a memcpy-like operation instead of TOML parsing. Requires adding `postcard` + updating `build.rs` to write binary and `settings_data.rs` to deserialize binary. + +```rust +// build.rs: serialize to binary instead of TOML +let bytes = postcard::to_allocvec(&settings).expect("Failed to serialize"); +fs::write(dest_path, bytes)?; + +// settings_data.rs: near-instant deserialization +let settings: Settings = postcard::from_bytes(SETTINGS_DATA) + .change_context(TrustedServerError::Configuration { ... })?; +``` + +**Recommendation**: Start with the `toml::from_str()` fix (1-line change, no new deps). If profiling still shows meaningful time in TOML parsing, upgrade to `postcard`. + +| Impact | LOC | Risk | +|--------|-----|------| +| **Very High** (~22% CPU eliminated) | 1-3 | Low — `build.rs` already resolves everything | + +#### 1.5 Trivial fixes batch | Fix | File | LOC | |-----|------|-----| @@ -319,9 +357,9 @@ This would overlap origin fetch time with auction execution, so the browser star After implementing Phases 1-2: 1. Deploy to staging -2. Run `./scripts/benchmark.sh --profile` against staging -3. Compare Server-Timing data: `init`/`backend`/`process`/`total` before vs after -4. Compare external TTFB and time-to-last-byte +2. Run `./scripts/benchmark.sh` against staging for external TTFB/TTLB +3. Run `./scripts/profile.sh` locally for flame graph comparison +4. Compare external TTFB and time-to-last-byte before vs after 5. Check Fastly dashboard for memory/compute metrics 6. If improvement is marginal, don't ship the streaming architecture (Phase 2) @@ -337,11 +375,12 @@ After implementing Phases 1-2: | # | Optimization | Impact | LOC | Risk | Phase | |---|---|---|---|---|---| -| **P0** | Server-Timing instrumentation | Prerequisite | Done | None | 0 | +| **P0** | Tooling and baselines | Prerequisite | Done | None | 0 | | **1.1** | Gzip streaming fix | **High** (memory) | -15/+3 | Low | 1 | | **1.2** | HTML rewriter streaming | **High** (memory) | ~30 | Medium | 1 | | **1.3** | Remove verbose logging | Medium-High | ~3 | None | 1 | -| **1.4** | Trivial fixes batch | Low-Medium | ~50 | None | 1 | +| **1.4** | Eliminate redundant `config` crate in `get_settings()` | **Very High** (~22% CPU) | 1-3 | Low | 1 | +| **1.5** | Trivial fixes batch | Low-Medium | ~50 | None | 1 | | **2.1** | `stream_to_client()` integration | **High** (TTLB) | ~80-120 | Medium | 2 | | **2.2** | Concurrent origin + auction | **Very High** | ~150-200 | High | 2 (future) | @@ -405,29 +444,40 @@ brew install hey # HTTP load testing tool (auto-installed by benchmark.sh) ./scripts/benchmark.sh --ttfb # TTFB analysis only ./scripts/benchmark.sh --load-test # Load test only ./scripts/benchmark.sh --cold-start # Cold start analysis -./scripts/benchmark.sh --profile # Server-Timing phase breakdown ./scripts/benchmark.sh --save baseline # Save results to file ./scripts/benchmark.sh --compare baseline # Compare against saved results ``` -### Profiling Against Staging +### WASM Guest Profiling (Flame Graphs) + +`fastly compute serve --profile-guest` samples the WASM call stack every 50us and writes a Firefox Profiler-compatible JSON on exit. This shows exactly which Rust functions consume CPU time — compression, HTML rewriting, string operations, init, etc. ```bash -# Requires Server-Timing branch deployed -BENCH_URL=https://example.edgecompute.app ./scripts/benchmark.sh --profile +./scripts/profile.sh # Profile GET / (publisher proxy) +./scripts/profile.sh --endpoint /auction \ + --method POST --body '{"adUnits":[]}' # Profile specific endpoint +./scripts/profile.sh --requests 50 # More samples for stable flame graph +./scripts/profile.sh --no-build # Skip rebuild +./scripts/profile.sh --open # Auto-open Firefox Profiler (macOS) + +# View: drag output file onto https://profiler.firefox.com/ ``` -### What the Benchmark Measures +The script builds, starts the profiling server, fires requests, stops the server, and saves the profile to `benchmark-results/profiles/`. -| Test | What it tells you | +### What the Tools Measure + +| Tool | What it tells you | |---|---| -| TTFB analysis | 20 sequential requests — detects cold start patterns | -| Cold start | First vs subsequent request latency | -| Endpoint latency | Per-endpoint timing breakdown (DNS, connect, TTFB, total) | -| Load test (hey) | Throughput (req/sec), latency distribution (P50/P95/P99) | -| Profile | Server-Timing phase breakdown: `init`/`backend`/`process`/`total` with min/avg/max/p95 | +| `benchmark.sh` — TTFB analysis | 20 sequential requests — detects cold start patterns | +| `benchmark.sh` — Cold start | First vs subsequent request latency | +| `benchmark.sh` — Endpoint latency | Per-endpoint timing breakdown (DNS, connect, TTFB, total) | +| `benchmark.sh` — Load test (hey) | Throughput (req/sec), latency distribution (P50/P95/P99) | +| `profile.sh` | Per-function CPU time inside WASM — flame graph via `--profile-guest` | + +**Use `profile.sh` first** to identify which functions are bottlenecks, then use `benchmark.sh` to measure the impact of fixes on external timing. -### What the Benchmark Does NOT Measure +### What These Tools Do NOT Measure - Real Fastly edge performance (Viceroy is a simulator) - WASM cold start on actual Fastly infrastructure @@ -442,11 +492,8 @@ BENCH_URL=https://example.edgecompute.app ./scripts/benchmark.sh --profile | File | Change | |------|--------| -| `crates/common/src/request_timer.rs` | **New** — `RequestTimer` with `Server-Timing` header output | -| `crates/common/src/lib.rs` | Added `pub mod request_timer;` | -| `crates/fastly/src/main.rs` | Wired timer: created at top, `mark_init()`, passed to handlers, `Server-Timing` header set | -| `crates/common/src/publisher.rs` | Added `mark_backend()` after `req.send()`, `mark_process()` after body processing | -| `scripts/benchmark.sh` | Added `--profile` mode, auto-install `hey` | +| `scripts/benchmark.sh` | HTTP load testing, TTFB analysis, cold start detection, auto-install `hey` | +| `scripts/profile.sh` | WASM guest profiling via `--profile-guest`, flame graph workflow | | `OPTIMIZATION.md` | This document | ### Teammate's `streaming_processor.rs` Changes @@ -469,9 +516,7 @@ Fix: change `process_through_compression` to accept an optional finalization clo ### Decisions Needed -1. **Deploy `feat/optimize-ts` to staging?** — Needed to get real Server-Timing data before proceeding with optimizations -2. **`#[fastly::main]` + `stream_to_client()` spike** — The macro calls `send_to_client()` on the returned Response, which will panic if streaming was already started. We need a minimal prototype to validate the approach before any Phase 2 work. See "Compatibility with `#[fastly::main]`" section above. -3. **Phase 1 vs Phase 2 priority** — Phase 1 (code fixes) is low risk and can ship independently. Phase 2 (streaming architecture) is higher impact but higher risk, and blocked on decision #2. -4. **Server-Timing on streaming paths** — `stream_to_client()` sends headers before processing completes, so `Server-Timing` can't include `backend`/`process` phases. Options: trailers, log-only, or accept the limitation. See Phase 0 section. -5. **Concurrent auction + origin (2.2)** — Not applicable for golf.com. Defer to a separate ticket? -6. **GzEncoder `finish()` correctness** — Fix the `drop(encoder)` error swallowing in `process_through_compression`, or accept the risk? +1. **Raw `main()` migration spike** — Validate end-to-end behavior on Fastly Compute when using undecorated `main()` + `Request::from_client()` and mixing buffered + streaming routes in one service. +2. **Phase 1 vs Phase 2 priority** — Phase 1 (code fixes) is low risk and can ship independently. Phase 2 (streaming architecture) is higher impact and should proceed after decision #1 confirms runtime behavior. +3. **Concurrent auction + origin (2.2)** — Not applicable for golf.com. Defer to a separate ticket? +4. **GzEncoder `finish()` correctness** — Fix the `drop(encoder)` error swallowing in `process_through_compression`, or accept the risk? diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index c43ed8a3..fd6fe5e1 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -51,7 +51,7 @@ pub mod openrtb; pub mod proxy; pub mod publisher; pub mod request_signing; -pub mod request_timer; + pub mod rsc_flight; pub mod settings; pub mod settings_data; diff --git a/crates/common/src/publisher.rs b/crates/common/src/publisher.rs index 0aa2d648..0794f066 100644 --- a/crates/common/src/publisher.rs +++ b/crates/common/src/publisher.rs @@ -9,7 +9,7 @@ use crate::constants::{COOKIE_SYNTHETIC_ID, HEADER_X_COMPRESS_HINT, HEADER_X_SYN use crate::cookies::create_synthetic_cookie; use crate::error::TrustedServerError; use crate::integrations::IntegrationRegistry; -use crate::request_timer::RequestTimer; + use crate::rsc_flight::RscFlightUrlRewriter; use crate::settings::Settings; use crate::streaming_processor::{Compression, PipelineConfig, StreamProcessor, StreamingPipeline}; @@ -177,7 +177,6 @@ pub fn handle_publisher_request( settings: &Settings, integration_registry: &IntegrationRegistry, mut req: Request, - timer: &mut RequestTimer, ) -> Result> { log::debug!("Proxying request to publisher_origin"); @@ -237,8 +236,6 @@ pub fn handle_publisher_request( message: "Failed to proxy request to origin".to_string(), })?; - timer.mark_backend(); - // Log all response headers for debugging log::debug!("Response headers:"); for (name, value) in response.get_headers() { @@ -314,8 +311,6 @@ pub fn handle_publisher_request( ); } - timer.mark_process(); - response.set_header(HEADER_X_SYNTHETIC_ID, synthetic_id.as_str()); if !has_synthetic_cookie { response.set_header( diff --git a/crates/common/src/request_timer.rs b/crates/common/src/request_timer.rs deleted file mode 100644 index ca3d07a3..00000000 --- a/crates/common/src/request_timer.rs +++ /dev/null @@ -1,151 +0,0 @@ -//! Lightweight per-request timer for profiling the Fastly Compute request lifecycle. -//! -//! Records phase durations using [`std::time::Instant`] and emits them as a -//! [`Server-Timing`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Server-Timing) -//! header value so that timings are visible in browser `DevTools` (Network → Timing tab). - -use std::time::Instant; - -/// Records wall-clock durations for each phase of a request. -/// -/// Usage: -/// ```ignore -/// let mut timer = RequestTimer::new(); // captures t0 -/// // ... init work ... -/// timer.mark_init(); // captures init duration -/// // ... backend fetch ... -/// timer.mark_backend(); // captures backend duration -/// // ... body processing ... -/// timer.mark_process(); // captures process duration -/// response.set_header("Server-Timing", timer.header_value()); -/// ``` -pub struct RequestTimer { - start: Instant, - init_ms: Option, - backend_ms: Option, - process_ms: Option, - last_mark: Instant, -} - -impl RequestTimer { - /// Start a new timer. Call this as early as possible in `main()`. - #[must_use] - pub fn new() -> Self { - let now = Instant::now(); - Self { - start: now, - init_ms: None, - backend_ms: None, - process_ms: None, - last_mark: now, - } - } - - /// Mark the end of the initialization phase (settings, orchestrator, registry). - pub fn mark_init(&mut self) { - let now = Instant::now(); - self.init_ms = Some(duration_ms(self.last_mark, now)); - self.last_mark = now; - } - - /// Mark the end of the backend fetch phase. - pub fn mark_backend(&mut self) { - let now = Instant::now(); - self.backend_ms = Some(duration_ms(self.last_mark, now)); - self.last_mark = now; - } - - /// Mark the end of body processing (decompress, rewrite, recompress). - pub fn mark_process(&mut self) { - let now = Instant::now(); - self.process_ms = Some(duration_ms(self.last_mark, now)); - self.last_mark = now; - } - - /// Total elapsed time since the timer was created. - #[must_use] - pub fn total_ms(&self) -> f64 { - duration_ms(self.start, Instant::now()) - } - - /// Format as a `Server-Timing` header value. - /// - /// Example output: - /// `init;dur=1.2, backend;dur=385.4, process;dur=12.3, total;dur=401.5` - #[must_use] - pub fn header_value(&self) -> String { - let mut parts = Vec::with_capacity(4); - - if let Some(ms) = self.init_ms { - parts.push(format!("init;dur={ms:.1}")); - } - if let Some(ms) = self.backend_ms { - parts.push(format!("backend;dur={ms:.1}")); - } - if let Some(ms) = self.process_ms { - parts.push(format!("process;dur={ms:.1}")); - } - - parts.push(format!("total;dur={:.1}", self.total_ms())); - parts.join(", ") - } - - /// Format a single-line log string for Fastly logs. - #[must_use] - pub fn log_line(&self) -> String { - format!( - "RequestTimer: init={:.1}ms backend={:.1}ms process={:.1}ms total={:.1}ms", - self.init_ms.unwrap_or(0.0), - self.backend_ms.unwrap_or(0.0), - self.process_ms.unwrap_or(0.0), - self.total_ms(), - ) - } -} - -impl Default for RequestTimer { - fn default() -> Self { - Self::new() - } -} - -fn duration_ms(from: Instant, to: Instant) -> f64 { - to.duration_since(from).as_secs_f64() * 1000.0 -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn header_value_includes_all_phases() { - let mut timer = RequestTimer::new(); - timer.mark_init(); - timer.mark_backend(); - timer.mark_process(); - - let header = timer.header_value(); - assert!(header.contains("init;dur="), "missing init phase"); - assert!(header.contains("backend;dur="), "missing backend phase"); - assert!(header.contains("process;dur="), "missing process phase"); - assert!(header.contains("total;dur="), "missing total phase"); - } - - #[test] - fn header_value_omits_unmarked_phases() { - let timer = RequestTimer::new(); - let header = timer.header_value(); - assert!(!header.contains("init;dur=")); - assert!(!header.contains("backend;dur=")); - assert!(header.contains("total;dur=")); - } - - #[test] - fn log_line_uses_zero_for_unmarked() { - let timer = RequestTimer::new(); - let log = timer.log_line(); - assert!(log.contains("init=0.0ms")); - assert!(log.contains("backend=0.0ms")); - assert!(log.contains("process=0.0ms")); - } -} diff --git a/crates/fastly/src/main.rs b/crates/fastly/src/main.rs index a060c400..0112bd99 100644 --- a/crates/fastly/src/main.rs +++ b/crates/fastly/src/main.rs @@ -17,7 +17,6 @@ use trusted_server_common::request_signing::{ handle_deactivate_key, handle_rotate_key, handle_trusted_server_discovery, handle_verify_signature, }; -use trusted_server_common::request_timer::RequestTimer; use trusted_server_common::settings::Settings; use trusted_server_common::settings_data::get_settings; @@ -26,8 +25,6 @@ use crate::error::to_error_response; #[fastly::main] fn main(req: Request) -> Result { - let mut timer = RequestTimer::new(); - init_logger(); let settings = match get_settings() { @@ -50,14 +47,11 @@ fn main(req: Request) -> Result { } }; - timer.mark_init(); - futures::executor::block_on(route_request( &settings, &orchestrator, &integration_registry, req, - &mut timer, )) } @@ -66,7 +60,6 @@ async fn route_request( orchestrator: &AuctionOrchestrator, integration_registry: &IntegrationRegistry, req: Request, - timer: &mut RequestTimer, ) -> Result { log::info!( "FASTLY_SERVICE_VERSION: {}", @@ -126,7 +119,7 @@ async fn route_request( path ); - match handle_publisher_request(settings, integration_registry, req, timer) { + match handle_publisher_request(settings, integration_registry, req) { Ok(response) => Ok(response), Err(e) => { log::error!("Failed to proxy to publisher origin: {:?}", e); @@ -143,9 +136,6 @@ async fn route_request( response.set_header(key, value); } - log::info!("{}", timer.log_line()); - response.set_header("Server-Timing", timer.header_value()); - Ok(response) } diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 8428f36c..daebccb3 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -14,7 +14,7 @@ # ./scripts/benchmark.sh --cold-start # Cold start analysis only # ./scripts/benchmark.sh --load-test # Load test only # ./scripts/benchmark.sh --quick # Quick smoke test (fewer requests) -# ./scripts/benchmark.sh --profile # Server-Timing phase breakdown (init/backend/process) +# ./scripts/benchmark.sh --ttfb # TTFB analysis only # ./scripts/benchmark.sh --save baseline # Save results to file # ./scripts/benchmark.sh --compare baseline # Compare against saved results # @@ -273,147 +273,6 @@ run_first_byte_analysis() { echo "" } -# --- Server-Timing Profiler --- - -# Parse "init;dur=1.2, backend;dur=385.4, process;dur=12.3, total;dur=401.5" -# into associative-style variables: st_init=1.2, st_backend=385.4, etc. -parse_server_timing() { - local header="$1" - st_init="" st_backend="" st_process="" st_total="" - for part in $(echo "$header" | tr ',' '\n'); do - local name dur - name=$(echo "$part" | sed 's/;.*//' | tr -d ' ') - dur=$(echo "$part" | grep -o 'dur=[0-9.]*' | cut -d= -f2) - case "$name" in - init) st_init="$dur" ;; - backend) st_backend="$dur" ;; - process) st_process="$dur" ;; - total) st_total="$dur" ;; - esac - done -} - -# Collect Server-Timing data over N requests and print stats -# Also captures external TTFB and total (TTLB) for streaming comparison -profile_endpoint() { - local method="$1" - local url="$2" - local label="$3" - local iterations="${4:-20}" - shift 4 - local extra_args=("$@") - - local init_vals=() backend_vals=() process_vals=() total_vals=() - local ttfb_vals=() ttlb_vals=() - - for i in $(seq 1 "$iterations"); do - # Capture both Server-Timing header and curl timing in one request - local raw - raw=$(curl -s -D- -o /dev/null \ - -w '\n__CURL_TIMING__ %{time_starttransfer} %{time_total}' \ - -X "$method" \ - ${extra_args[@]+"${extra_args[@]}"} \ - "$url" \ - --max-time 30 2>/dev/null) - - # Extract Server-Timing header - local header - header=$(echo "$raw" | grep -i '^server-timing:' | sed 's/[Ss]erver-[Tt]iming: *//') - - # Extract curl timing (TTFB and total in seconds) - local curl_timing - curl_timing=$(echo "$raw" | grep '__CURL_TIMING__' | sed 's/__CURL_TIMING__ //') - if [ -n "$curl_timing" ]; then - local ext_ttfb ext_total - ext_ttfb=$(echo "$curl_timing" | awk '{printf "%.1f", $1 * 1000}') - ext_total=$(echo "$curl_timing" | awk '{printf "%.1f", $2 * 1000}') - ttfb_vals+=("$ext_ttfb") - ttlb_vals+=("$ext_total") - fi - - if [ -z "$header" ]; then - continue - fi - - parse_server_timing "$header" - [ -n "$st_init" ] && init_vals+=("$st_init") - [ -n "$st_backend" ] && backend_vals+=("$st_backend") - [ -n "$st_process" ] && process_vals+=("$st_process") - [ -n "$st_total" ] && total_vals+=("$st_total") - done - - echo -e " ${BOLD}$label${RESET} ($method, $iterations iterations)" - echo "" - printf " %-12s %8s %8s %8s %8s\n" "Phase" "Min" "Avg" "Max" "P95" - printf " %-12s %8s %8s %8s %8s\n" "----------" "------" "------" "------" "------" - print_stats "init" "${init_vals[@]}" - print_stats "backend" "${backend_vals[@]}" - print_stats "process" "${process_vals[@]}" - print_stats "total" "${total_vals[@]}" - echo "" - echo -e " ${BOLD}External timing (curl):${RESET}" - printf " %-12s %8s %8s %8s %8s\n" "Metric" "Min" "Avg" "Max" "P95" - printf " %-12s %8s %8s %8s %8s\n" "----------" "------" "------" "------" "------" - print_stats "TTFB" "${ttfb_vals[@]}" - print_stats "TTLB" "${ttlb_vals[@]}" - echo "" -} - -# Compute min/avg/max/p95 from a list of floats -print_stats() { - local name="$1" - shift - local vals=("$@") - local count=${#vals[@]} - - if [ "$count" -eq 0 ]; then - printf " %-12s %8s %8s %8s %8s\n" "$name" "-" "-" "-" "-" - return - fi - - # Sort values - local sorted - sorted=$(printf '%s\n' "${vals[@]}" | sort -g) - - local min avg max p95 - min=$(echo "$sorted" | head -1) - max=$(echo "$sorted" | tail -1) - - local sum - sum=$(printf '%s\n' "${vals[@]}" | awk '{s+=$1} END {printf "%.1f", s}') - avg=$(echo "$sum $count" | awk '{printf "%.1f", $1/$2}') - - local p95_idx - p95_idx=$(echo "$count" | awk '{printf "%d", int($1 * 0.95 + 0.5)}') - [ "$p95_idx" -lt 1 ] && p95_idx=1 - p95=$(echo "$sorted" | sed -n "${p95_idx}p") - - printf " %-12s %7.1f %7.1f %7.1f %7.1f\n" "$name" "$min" "$avg" "$max" "$p95" -} - -run_profile() { - local iterations="${1:-20}" - - log_header "SERVER-TIMING PROFILE" - log_info "Collecting Server-Timing header data over $iterations requests per endpoint" - log_info "Phases: init (setup) → backend (origin fetch) → process (body rewrite) → total" - echo "" - - profile_endpoint GET "$BASE_URL/static/tsjs=tsjs-unified.min.js" \ - "Static JS bundle" "$iterations" - - profile_endpoint GET "$BASE_URL/.well-known/trusted-server.json" \ - "Discovery endpoint" "$iterations" - - profile_endpoint GET "$BASE_URL/" \ - "Publisher proxy (fallback)" "$iterations" - - profile_endpoint POST "$BASE_URL/auction" \ - "Auction endpoint" "$iterations" \ - -H "Content-Type: application/json" \ - -d "$AUCTION_PAYLOAD" -} - save_results() { local name="$1" mkdir -p "$RESULTS_DIR" @@ -495,9 +354,6 @@ main() { --ttfb) run_first_byte_analysis ;; - --profile) - run_profile "${2:-20}" - ;; --save) save_results "${2:?Usage: --save }" ;; diff --git a/scripts/profile.sh b/scripts/profile.sh new file mode 100755 index 00000000..66cebf0d --- /dev/null +++ b/scripts/profile.sh @@ -0,0 +1,269 @@ +#!/usr/bin/env bash +# +# WASM Guest Profiler for Trusted Server +# +# Captures function-level flame graphs using Fastly's Wasmtime guest profiler. +# Samples the WASM call stack every 50us and writes a Firefox Profiler-compatible +# JSON file after the server stops. +# +# Prerequisites: +# - Fastly CLI installed: https://developer.fastly.com/learning/tools/cli +# - Rust wasm32-wasip1 target: rustup target add wasm32-wasip1 +# +# Usage: +# ./scripts/profile.sh # Profile GET / (publisher proxy) +# ./scripts/profile.sh --endpoint /auction \ +# --method POST --body '{"adUnits":[]}' # Profile specific endpoint +# ./scripts/profile.sh --requests 50 # More samples for stable flame graph +# ./scripts/profile.sh --no-build # Skip rebuild, use existing binary +# ./scripts/profile.sh --open # Auto-open Firefox Profiler (macOS) +# +# Output: +# Profile saved to benchmark-results/profiles/.json +# View: drag file onto https://profiler.firefox.com/ +# + +set -euo pipefail + +# --- Configuration --- +PROJECT_ROOT="$(cd "$(dirname "$0")/.." && pwd)" +PROFILE_DIR="$PROJECT_ROOT/benchmark-results/profiles" +BASE_URL="http://127.0.0.1:7676" +SERVER_PID="" + +# Defaults +ENDPOINT="/" +METHOD="GET" +REQUESTS=20 +BODY="" +SKIP_BUILD=false +AUTO_OPEN=false + +# Colors (disabled if not a terminal) +if [ -t 1 ]; then + BOLD='\033[1m' + GREEN='\033[0;32m' + YELLOW='\033[0;33m' + RED='\033[0;31m' + CYAN='\033[0;36m' + RESET='\033[0m' +else + BOLD='' GREEN='' YELLOW='' RED='' CYAN='' RESET='' +fi + +# --- Helpers --- + +log_header() { + echo "" + echo -e "${BOLD}${CYAN}=== $1 ===${RESET}" + echo "" +} + +log_info() { + echo -e "${GREEN}[INFO]${RESET} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${RESET} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${RESET} $1" +} + +stop_server() { + # Kill the fastly CLI process if we have its PID + if [ -n "$SERVER_PID" ] && kill -0 "$SERVER_PID" 2>/dev/null; then + kill "$SERVER_PID" 2>/dev/null || true + wait "$SERVER_PID" 2>/dev/null || true + fi + # Also kill any Viceroy process still on port 7676 + # (fastly CLI spawns Viceroy as a child; killing the CLI doesn't always propagate) + local port_pids + port_pids=$(lsof -ti :7676 2>/dev/null | while read pid; do + # Only kill Viceroy processes, not unrelated listeners (e.g. Chrome) + if ps -p "$pid" -o command= 2>/dev/null | grep -q viceroy; then + echo "$pid" + fi + done) + if [ -n "$port_pids" ]; then + echo "$port_pids" | xargs kill 2>/dev/null || true + sleep 1 + fi +} + +cleanup() { + stop_server +} + +trap cleanup EXIT + +usage() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --endpoint PATH URL path to profile (default: /)" + echo " --method METHOD HTTP method (default: GET)" + echo " --body DATA Request body for POST/PUT" + echo " --requests N Number of requests to fire (default: 20)" + echo " --no-build Skip fastly compute build" + echo " --open Auto-open Firefox Profiler after capture (macOS)" + echo " --help Show this help" + exit 0 +} + +# --- Parse Arguments --- + +while [[ $# -gt 0 ]]; do + case "$1" in + --endpoint) ENDPOINT="$2"; shift 2 ;; + --method) METHOD="$2"; shift 2 ;; + --body) BODY="$2"; shift 2 ;; + --requests) REQUESTS="$2"; shift 2 ;; + --no-build) SKIP_BUILD=true; shift ;; + --open) AUTO_OPEN=true; shift ;; + --help|-h) usage ;; + *) log_error "Unknown option: $1"; usage ;; + esac +done + +# --- Main --- + +log_header "WASM GUEST PROFILER" +log_info "Endpoint: $METHOD $ENDPOINT" +log_info "Requests: $REQUESTS" + +# Step 0: Kill any existing server on the profiling port +EXISTING_PID=$(lsof -ti :7676 2>/dev/null | grep -v "^$" || true) +if [ -n "$EXISTING_PID" ]; then + log_warn "Port 7676 already in use (PID: $EXISTING_PID). Stopping existing server..." + kill $EXISTING_PID 2>/dev/null || true + sleep 1 + # Force kill if still alive + if lsof -ti :7676 &>/dev/null; then + kill -9 $(lsof -ti :7676) 2>/dev/null || true + sleep 1 + fi + log_info "Existing server stopped." +fi + +# Step 1: Build +if [ "$SKIP_BUILD" = false ]; then + log_header "BUILD" + log_info "Building WASM binary with debug symbols (release + debug=1)..." + (cd "$PROJECT_ROOT" && fastly compute build) + echo "" + log_info "Build complete." +else + log_info "Skipping build (--no-build)" +fi + +# Step 2: Start server with --profile-guest +log_header "START PROFILING SERVER" +log_info "Starting fastly compute serve --profile-guest..." + +(cd "$PROJECT_ROOT" && fastly compute serve --profile-guest 2>&1) & +SERVER_PID=$! +log_info "Server PID: $SERVER_PID" + +# Wait for server to be ready +log_info "Waiting for server at $BASE_URL..." +for i in $(seq 1 30); do + if curl -s -o /dev/null --max-time 2 "$BASE_URL/" 2>/dev/null; then + log_info "Server ready." + break + fi + if ! kill -0 "$SERVER_PID" 2>/dev/null; then + log_error "Server process exited unexpectedly." + exit 1 + fi + if [ "$i" -eq 30 ]; then + log_error "Server did not become ready within 30 seconds." + exit 1 + fi + sleep 1 +done + +# Step 3: Fire requests +log_header "CAPTURING PROFILE" +log_info "Firing $REQUESTS requests to $METHOD $ENDPOINT..." + +CURL_ARGS=(-s -o /dev/null -X "$METHOD") +if [ -n "$BODY" ]; then + CURL_ARGS+=(-H "Content-Type: application/json" -d "$BODY") +fi + +for i in $(seq 1 "$REQUESTS"); do + local_code=$(curl -w "%{http_code}" "${CURL_ARGS[@]}" "${BASE_URL}${ENDPOINT}" --max-time 30 2>/dev/null || echo "000") + printf "\r Request %d/%d (HTTP %s)" "$i" "$REQUESTS" "$local_code" +done +echo "" +log_info "All requests complete." + +# Step 4: Stop server (profile is written on exit) +log_header "COLLECTING PROFILE" +log_info "Stopping server to flush profile data..." + +stop_server +SERVER_PID="" + +# Step 5: Find and move profile file +# Viceroy writes profiles to guest-profiles/ directory (e.g., guest-profiles/1771483114-2.json) +# or as guest-profile-*.json in the project root depending on CLI version +mkdir -p "$PROFILE_DIR" +TIMESTAMP=$(date '+%Y%m%d-%H%M%S') + +GUEST_PROFILES_DIR="$PROJECT_ROOT/guest-profiles" +if [ -d "$GUEST_PROFILES_DIR" ]; then + # Find the most recently modified .json file in guest-profiles/ + PROFILE_FILE=$(find "$GUEST_PROFILES_DIR" -name "*.json" -newer "$0" -print 2>/dev/null | head -1 || true) + if [ -n "$PROFILE_FILE" ]; then + DEST="$PROFILE_DIR/profile-${TIMESTAMP}.json" + cp "$PROFILE_FILE" "$DEST" + fi +fi + +if [ -z "${DEST:-}" ] || [ ! -f "${DEST:-}" ]; then + # Fallback: check project root for guest-profile-*.json + PROFILE_FILE=$(find "$PROJECT_ROOT" -maxdepth 1 -name "guest-profile-*.json" -newer "$0" -print -quit 2>/dev/null || true) + if [ -n "$PROFILE_FILE" ]; then + DEST="$PROFILE_DIR/profile-${TIMESTAMP}.json" + mv "$PROFILE_FILE" "$DEST" + fi +fi + +if [ -z "${DEST:-}" ] || [ ! -f "${DEST:-}" ]; then + log_warn "No profile file found." + log_warn "Check $GUEST_PROFILES_DIR/ or $PROJECT_ROOT/ for profile output." + log_warn "The --profile-guest flag may not be supported by your Fastly CLI version." + exit 1 +fi + +FILE_SIZE=$(du -h "$DEST" | cut -f1) + +log_header "PROFILE CAPTURED" +log_info "File: $DEST" +log_info "Size: $FILE_SIZE" +log_info "Samples: ~$((REQUESTS * 20)) (estimated at 50us intervals)" +echo "" +echo -e "${BOLD}To view the flame graph:${RESET}" +echo " 1. Open https://profiler.firefox.com/" +echo " 2. Drag and drop: $DEST" +echo "" +echo -e "${BOLD}What to look for:${RESET}" +echo " - Tall stacks in GzDecoder/GzEncoder = compression overhead" +echo " - Wide bars in lol_html = HTML rewriting cost" +echo " - Time in format!/replace/to_string = string allocation churn" +echo " - Time in Settings::deserialize = init overhead" +echo "" + +# Step 6: Auto-open if requested +if [ "$AUTO_OPEN" = true ]; then + if command -v open &>/dev/null; then + log_info "Opening Firefox Profiler..." + open "https://profiler.firefox.com/" + log_info "Drag the profile file onto the page to load it." + else + log_warn "--open is only supported on macOS" + fi +fi From 33b527d9eefde69022632783b1fdc89257fa35a1 Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Thu, 19 Feb 2026 19:51:11 +0530 Subject: [PATCH 5/7] Optimize get_settings() by bypassing redundant config crate at runtime MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit build.rs already resolves trusted-server.toml + env vars at compile time and embeds the result. Replace Settings::from_toml() with direct toml::from_str() to skip the config crate pipeline on every request. Profiling confirms: ~5-8% → ~3.3% CPU per request. --- OPTIMIZATION.md | 254 +++++++++++++++-------------- crates/common/Cargo.toml | 1 + crates/common/src/settings.rs | 2 +- crates/common/src/settings_data.rs | 18 +- 4 files changed, 150 insertions(+), 125 deletions(-) diff --git a/OPTIMIZATION.md b/OPTIMIZATION.md index 77e7df45..05e23247 100644 --- a/OPTIMIZATION.md +++ b/OPTIMIZATION.md @@ -1,82 +1,49 @@ # Trusted Server Optimization Plan -## Status +## Summary -| Item | Status | -|------|--------| -| Benchmark tooling (`scripts/benchmark.sh`) | **Implemented** | -| WASM guest profiling (`scripts/profile.sh`) | **Implemented** (flame graphs via `--profile-guest`) | -| Viceroy baseline measurements | **Complete** | -| Staging external TTFB baseline | **Complete** (against staging deployment) | -| Streaming architecture (`stream_to_client`) | **Planned** — see Phase 2 | -| Code-level optimizations | **Planned** — see Phase 1 | +This document presents a performance analysis and optimization plan for the Trusted Server running on Fastly Compute (WASM). WASM guest profiling reveals that **HTML processing consumes ~76% of per-request CPU** on the publisher proxy path, with the `lol_html` parser alone accounting for ~47%. The optimization strategy focuses on two phases: (1) low-risk code fixes that reduce memory waste and enable streaming, and (2) an architectural shift to `stream_to_client()` that eliminates response buffering and reduces time-to-last-byte. --- -## Key Finding: Streaming to Client IS Possible - -The Fastly Compute SDK provides `Response::stream_to_client()` which returns a `StreamingBody` handle that implements `std::io::Write`. Headers are sent immediately and body chunks stream as they're written. - -```rust -// Current: fully buffered (no bytes reach client until everything is done) -let body = response.take_body(); -let mut output = Vec::new(); -pipeline.process(body, &mut output)?; // blocks until complete -response.set_body(Body::from(output)); // only NOW does client get anything -return Ok(response); - -// Possible: streaming (headers sent immediately, body chunks as processed) -let body = response.take_body(); -let mut streaming = response.stream_to_client(); // headers sent NOW -pipeline.process(body, &mut streaming)?; // each write() → client -streaming.finish()?; -``` +## Profiling Results -This changes the optimization strategy — **time-to-last-byte (TTLB) and peak memory CAN be significantly reduced**. TTFB itself is still gated by the Fastly platform floor (~200ms) plus backend response time, but body bytes start reaching the client as soon as the first chunk is processed instead of waiting for the entire response to be buffered. +**Methodology**: WASM guest profiling via `fastly compute serve --profile-guest`, 50 requests to `GET /` (publisher proxy to golf.com, 222KB HTML). ~131 samples per request at 50μs intervals. Profiles analyzed in Firefox Profiler. -### Compatibility with `#[fastly::main]` — use undecorated `main()` (recommended) - -For streaming final responses, the Fastly SDK docs already define the intended pattern: - -- `Request::from_client()` docs explicitly state it is incompatible with `#[fastly::main]` and recommend an undecorated `main()` with explicit response sending. -- `Response::send_to_client()` / `Response::stream_to_client()` include the same compatibility guidance. -- `fastly::init()` is public (doc-hidden) and can be called from raw `main()` to initialize the ABI. - -This means approach #1 is the correct architecture for streaming paths, and approaches like `std::process::exit(0)` or sentinel responses are unnecessary. - -Recommended shape: - -```rust -fn main() -> Result<(), fastly::Error> { - fastly::init(); - let req = fastly::Request::from_client(); - - match route_request(req)? { - Some(resp) => resp.send_to_client(), // non-streaming path - None => {} // streaming path already sent + finished - } +### CPU Breakdown — Top Level - Ok(()) -} -``` +| % CPU | Function | Notes | +|-------|----------|-------| +| ~96% | `trusted_server_fastly::main` | Almost all time is in application code | +| ~90% | `route_request` → `handle_publisher_request` | Publisher proxy is the hot path | +| **~76%** | **HTML processing pipeline** (`streaming_processor` → `lol_html`) | **Dominant bottleneck** | +| ~~5-8%~~ → **3.3%** | `get_settings()` | ~~Redundant config crate parsing~~ **Fixed** — now uses `toml::from_str` | +| ~5-7% | `handle_publisher_request` (non-HTML) | Backend send, cookie handling | -**Action item**: Do a focused spike on real Fastly Compute to validate runtime behavior (no double-send panics across mixed routes, proper error behavior for partially streamed responses, and observability expectations). The API viability question is resolved. +### CPU Breakdown — HTML Processing (~76% total) -Non-streaming endpoints (static JS, discovery, auction) continue returning `Response` normally. Only the publisher proxy path (the hot path) would use streaming. +| % CPU | Function | Notes | +|-------|----------|-------| +| **~47%** | `lol_html::parser` state machine | HTML tokenizer/parser — character-by-character parsing | +| ~11% | `create_html_processor` | Building the lol_html rewriter with all handlers | +| ~18% | Processing callbacks | URL rewriting, attribute scanning, output sink handling | ---- +### CPU Breakdown — Other Components -## How to Use This Document +| % CPU | Function | Notes | +|-------|----------|-------| +| ~2% | `IntegrationRegistry` | Route lookup + attribute rewriting + initialization | +| ~0.8% | Memory allocation (`RawVec::reserve`) | Buffer growth during processing | +| ~0.5% | Logging (`fern` / `log_fastly`) | Minimal overhead | +| ~0.5% | Synthetic ID generation | HMAC computation | +| ~0.5% | Header extraction | `fastly::http::handle::get_header_values` | -**For any optimization work:** +### Key Takeaways -1. Run `./scripts/benchmark.sh --save baseline` on `main` -2. Make your change on a branch -3. Rebuild: `fastly compute build` -4. Run `./scripts/benchmark.sh --save branch-name` -5. Compare: `diff benchmark-results/baseline.txt benchmark-results/branch-name.txt` -6. For production: `BENCH_URL=https://your-staging.edgecompute.app ./scripts/benchmark.sh --profile` -7. If the numbers don't improve meaningfully, don't ship it +1. **The lol_html parser at ~47% cannot be directly optimized** — it's doing its job parsing a 222KB HTML page. The focus should be on reducing unnecessary work around it and enabling streaming so processed chunks reach the client sooner. +2. **`get_settings()` was ~5-8%, now ~3.3% after fix** — `build.rs` already resolves all config at compile time. Replaced `Settings::from_toml()` with direct `toml::from_str()` to eliminate redundant `config` crate pipeline. +3. **Memory allocation at ~0.8%** confirms buffer growth during processing. Fixing gzip and HTML streaming (items 1.1 + 1.2) should reduce this. +4. **Logging is negligible** at ~0.5%, but `log::info!("Settings {settings:?}")` still serializes the entire Settings struct on every request. --- @@ -107,29 +74,68 @@ Measured externally against staging deployment (golf.com proxy), `main` branch. | `GET /` (publisher proxy, golf.com) | ~234 ms | ~441 ms | 230 KB | Backend + processing | | `GET /.well-known/trusted-server.json` | ~191 ms | - | - | Returns 500 (needs investigation) | -**Key insight**: Static JS has ~204ms TTFB with zero backend work **from this specific benchmark vantage point**. That number includes client-to-edge RTT, DNS, TLS/connection state, and edge processing; it is **not** a universal Fastly floor. `WASM` instantiation can contribute on cold paths, but warm requests from clients near a POP can be much lower. +**Key insight**: Static JS has ~204ms TTFB with zero backend work **from this specific benchmark vantage point**. That number includes client-to-edge RTT, DNS, TLS/connection state, and edge processing — it is **not** a universal Fastly floor. For this dataset, treat static TTFB as an environment baseline and compare deltas: the publisher proxy adds only ~30ms TTFB on top. The larger optimization target is the TTFB→TTLB gap (~207ms here), which streaming can shrink by sending body chunks as they are processed instead of waiting for full buffering. --- -## Implementation Plan +## Key Finding: Streaming to Client IS Possible + +The Fastly Compute SDK provides `Response::stream_to_client()` which returns a `StreamingBody` handle that implements `std::io::Write`. Headers are sent immediately and body chunks stream as they're written. -### Phase 0: Tooling and Baselines (DONE) +```rust +// Current: fully buffered (no bytes reach client until everything is done) +let body = response.take_body(); +let mut output = Vec::new(); +pipeline.process(body, &mut output)?; // blocks until complete +response.set_body(Body::from(output)); // only NOW does client get anything +return Ok(response); + +// Possible: streaming (headers sent immediately, body chunks as processed) +let body = response.take_body(); +let mut streaming = response.stream_to_client(); // headers sent NOW +pipeline.process(body, &mut streaming)?; // each write() → client +streaming.finish()?; +``` -**Branch**: `feat/optimize-ts` +This changes the optimization strategy — **time-to-last-byte (TTLB) and peak memory CAN be significantly reduced**. TTFB itself is still gated by the Fastly platform floor plus backend response time, but body bytes start reaching the client as soon as the first chunk is processed instead of waiting for the entire response to be buffered. -Completed: -- `scripts/benchmark.sh` — HTTP load testing with TTFB analysis, cold start detection, endpoint latency breakdown -- `scripts/profile.sh` — WASM guest profiling via `fastly compute serve --profile-guest`, outputs Firefox Profiler-compatible flame graphs -- Viceroy baseline measurements (see tables above) -- Staging external TTFB baseline +### Compatibility with `#[fastly::main]` — use undecorated `main()` (recommended) + +For streaming final responses, the Fastly SDK docs define the intended pattern: + +- `Request::from_client()` docs explicitly state it is incompatible with `#[fastly::main]` and recommend an undecorated `main()` with explicit response sending. +- `Response::send_to_client()` / `Response::stream_to_client()` include the same compatibility guidance. +- `fastly::init()` is public (doc-hidden) and can be called from raw `main()` to initialize the ABI. + +Recommended shape: + +```rust +fn main() -> Result<(), fastly::Error> { + fastly::init(); + let req = fastly::Request::from_client(); + + match route_request(req)? { + Some(resp) => resp.send_to_client(), // non-streaming path + None => {} // streaming path already sent + finished + } + + Ok(()) +} +``` + +**Action item**: Do a focused spike on real Fastly Compute to validate runtime behavior (no double-send panics across mixed routes, proper error behavior for partially streamed responses, and observability expectations). + +Non-streaming endpoints (static JS, discovery, auction) continue returning `Response` normally. Only the publisher proxy path (the hot path) would use streaming. --- +## Implementation Plan + ### Phase 1: Low-Risk Code Optimizations -These are small, safe changes that reduce CPU and memory waste. Ship as one PR, measure before/after. +Small, safe changes that reduce CPU and memory waste. Ship as one PR, measure before/after. #### 1.1 Fix gzip streaming — remove full-body buffering @@ -195,39 +201,18 @@ impl StreamProcessor for HtmlRewriterAdapter { |--------|-----|------| | **High** (HTML is most common content type; eliminates 222KB+ buffer) | ~30 refactored | Medium — needs test coverage | -#### 1.3 Reduce verbose per-request logging - -**Files**: `crates/fastly/src/main.rs:37,64-67,152-177` - -**Problem**: `log::info!("Settings {settings:?}")` serializes the entire Settings struct (~2KB) on every request. `FASTLY_SERVICE_VERSION` env var logged at info level. The logger is configured with `max_level(LevelFilter::Debug)`, meaning every `debug!` and above is evaluated. - -**Fix**: Downgrade the Settings dump to `log::debug!` and tighten the logger's `max_level` to `LevelFilter::Info` for production. The `log_fastly` crate supports `filter_module()` for per-module levels if we still want debug output from specific modules. When the level is filtered, `log` macros short-circuit before evaluating arguments — so the `Settings` `Debug` format is never even computed. - -```rust -// Before: everything at Debug and above is serialized -.max_level(log::LevelFilter::Debug) - -// After: Info in production, debug only for specific modules if needed -.max_level(log::LevelFilter::Info) -// Optional: .filter_module("trusted_server", log::LevelFilter::Debug) -``` - -| Impact | LOC | Risk | -|--------|-----|------| -| Medium-High | ~3 | None | - -#### 1.4 Eliminate redundant `config` crate parsing in `get_settings()` — **22% CPU** +#### 1.3 ~~Eliminate redundant `config` crate parsing in `get_settings()` — ~5-8% CPU~~ DONE (~3.3% post-fix) **Files**: `crates/common/src/settings_data.rs`, `crates/common/src/settings.rs` -**Problem**: Flame graph profiling shows `get_settings()` consuming ~22% of per-request CPU. The `build.rs` already merges `trusted-server.toml` + all `TRUSTED_SERVER__*` env vars at compile time and writes a fully-resolved TOML file to `target/trusted-server-out.toml`. But at runtime, `get_settings()` calls `Settings::from_toml()`, which re-runs the entire `config` crate pipeline — `Config::builder().add_source(File).add_source(Environment).build().try_deserialize()` — redundantly scanning env vars and merging sources that were already resolved at build time. +**Problem**: Profiling shows `get_settings()` consuming ~5-8% of per-request CPU. The `build.rs` already merges `trusted-server.toml` + all `TRUSTED_SERVER__*` env vars at compile time and writes a fully-resolved TOML file to `target/trusted-server-out.toml`. But at runtime, `get_settings()` calls `Settings::from_toml()`, which re-runs the entire `config` crate pipeline — `Config::builder().add_source(File).add_source(Environment).build().try_deserialize()` — redundantly scanning env vars and merging sources that were already resolved at build time. **Root cause**: `settings_data.rs` embeds the build-time-resolved TOML via `include_bytes!`, then hands it to `from_toml()` which treats it as a raw config source and re-layers env vars on top. **Fix**: Replace `Settings::from_toml()` with direct `toml::from_str()` in `get_settings()`. The embedded TOML is already fully resolved — no `config` crate needed at runtime. ```rust -// Before (22% CPU — re-runs config crate pipeline + env var scan) +// Before (~5-8% CPU — re-runs config crate pipeline + env var scan) let settings = Settings::from_toml(toml_str)?; // After (near-instant — just TOML deserialization) @@ -253,7 +238,29 @@ let settings: Settings = postcard::from_bytes(SETTINGS_DATA) | Impact | LOC | Risk | |--------|-----|------| -| **Very High** (~22% CPU eliminated) | 1-3 | Low — `build.rs` already resolves everything | +| **Medium** (~5-8% → ~3.3% CPU, verified) | 1-3 | Low — `build.rs` already resolves everything | + +**Status**: Done. Replaced `Settings::from_toml()` with `toml::from_str()` + explicit `normalize()` + `validate()`. Profiling confirmed: **~5-8% → ~3.3% CPU per request**. + +#### 1.4 Reduce verbose per-request logging — ~0.5% CPU + +**Files**: `crates/fastly/src/main.rs:37,64-67,152-177` + +**Problem**: `log::info!("Settings {settings:?}")` serializes the entire Settings struct (~2KB) on every request. `FASTLY_SERVICE_VERSION` env var logged at info level. The logger is configured with `max_level(LevelFilter::Debug)`, meaning every `debug!` and above is evaluated. + +**Fix**: Downgrade the Settings dump to `log::debug!` and tighten the logger's `max_level` to `LevelFilter::Info` for production. When the level is filtered, `log` macros short-circuit before evaluating arguments — so the `Settings` `Debug` format is never even computed. + +```rust +// Before: everything at Debug and above is serialized +.max_level(log::LevelFilter::Debug) + +// After: Info in production, debug only for specific modules if needed +.max_level(log::LevelFilter::Info) +``` + +| Impact | LOC | Risk | +|--------|-----|------| +| Low (~0.5% CPU) | ~3 | None | #### 1.5 Trivial fixes batch @@ -271,7 +278,7 @@ let settings: Settings = postcard::from_bytes(SETTINGS_DATA) ### Phase 2: Streaming Response Architecture -This is the high-impact architectural change. Uses Fastly's `stream_to_client()` API to send response headers and body chunks to the client as they're processed, instead of buffering everything. +The high-impact architectural change. Uses Fastly's `stream_to_client()` API to send response headers and body chunks to the client as they're processed, instead of buffering everything. #### 2.1 Publisher proxy: `stream_to_client()` integration @@ -344,7 +351,7 @@ The idea: use `req.send_async()` to launch the origin fetch concurrently with au This would overlap origin fetch time with auction execution, so the browser starts receiving `` content (CSS, fonts) while the auction is still running. -**Note**: This requires significant refactoring of the auction orchestrator and HTML processor to support async injection. The pseudo-code in the teammate's proposal (`origin_pending.poll()`, `run_auction_async`) represents the desired architecture but these APIs don't exist yet and would need to be built. +**Note**: This requires significant refactoring of the auction orchestrator and HTML processor to support async injection. | Impact | LOC | Risk | |--------|-----|------| @@ -373,16 +380,15 @@ After implementing Phases 1-2: ## Optimization Summary Table -| # | Optimization | Impact | LOC | Risk | Phase | -|---|---|---|---|---|---| -| **P0** | Tooling and baselines | Prerequisite | Done | None | 0 | -| **1.1** | Gzip streaming fix | **High** (memory) | -15/+3 | Low | 1 | -| **1.2** | HTML rewriter streaming | **High** (memory) | ~30 | Medium | 1 | -| **1.3** | Remove verbose logging | Medium-High | ~3 | None | 1 | -| **1.4** | Eliminate redundant `config` crate in `get_settings()` | **Very High** (~22% CPU) | 1-3 | Low | 1 | -| **1.5** | Trivial fixes batch | Low-Medium | ~50 | None | 1 | -| **2.1** | `stream_to_client()` integration | **High** (TTLB) | ~80-120 | Medium | 2 | -| **2.2** | Concurrent origin + auction | **Very High** | ~150-200 | High | 2 (future) | +| # | Optimization | Measured CPU | Impact | LOC | Risk | Phase | +|---|---|---|---|---|---|---| +| **1.1** | Gzip streaming fix | Part of ~76% HTML pipeline | **High** (memory) | -15/+3 | Low | 1 | +| **1.2** | HTML rewriter streaming | Part of ~76% HTML pipeline | **High** (memory) | ~30 | Medium | 1 | +| **1.3** | ~~Eliminate redundant `config` crate~~ | ~~5-8%~~ → **3.3%** | **Done** | 1-3 | Low | 1 | +| **1.4** | Reduce verbose logging | ~0.5% | Low | ~3 | None | 1 | +| **1.5** | Trivial fixes batch | <1% combined | Low | ~50 | None | 1 | +| **2.1** | `stream_to_client()` integration | N/A (architectural) | **High** (TTLB) | ~80-120 | Medium | 2 | +| **2.2** | Concurrent origin + auction | N/A (architectural) | **Very High** | ~150-200 | High | 2 (future) | --- @@ -423,8 +429,8 @@ Client → Fastly Edge → [WASM starts] → StreamingBody.finish() done ``` -**Memory**: ~8KB chunk buffer + lol_html internal state (significantly less than 4x response size — exact savings need measurement) -**TTLB**: client receives first body bytes after first processed chunk, instead of waiting for all processing to complete. For a 222KB page, the savings is the entire processing time (decompression + rewriting + recompression). +**Memory**: ~8KB chunk buffer + lol_html internal state (significantly less than 4x response size) +**TTLB**: client receives first body bytes after first processed chunk, instead of waiting for all processing to complete --- @@ -450,7 +456,7 @@ brew install hey # HTTP load testing tool (auto-installed by benchmark.sh) ### WASM Guest Profiling (Flame Graphs) -`fastly compute serve --profile-guest` samples the WASM call stack every 50us and writes a Firefox Profiler-compatible JSON on exit. This shows exactly which Rust functions consume CPU time — compression, HTML rewriting, string operations, init, etc. +`fastly compute serve --profile-guest` samples the WASM call stack every 50μs and writes a Firefox Profiler-compatible JSON on exit. This shows exactly which Rust functions consume CPU time — compression, HTML rewriting, string operations, init, etc. ```bash ./scripts/profile.sh # Profile GET / (publisher proxy) @@ -486,15 +492,21 @@ The script builds, starts the profiling server, fires requests, stops the server --- -## Notes for Team +## How to Use This Document -### What's already on `feat/optimize-ts` branch (uncommitted) +**For any optimization work:** -| File | Change | -|------|--------| -| `scripts/benchmark.sh` | HTTP load testing, TTFB analysis, cold start detection, auto-install `hey` | -| `scripts/profile.sh` | WASM guest profiling via `--profile-guest`, flame graph workflow | -| `OPTIMIZATION.md` | This document | +1. Run `./scripts/benchmark.sh --save baseline` on `main` +2. Make your change on a branch +3. Rebuild: `fastly compute build` +4. Run `./scripts/benchmark.sh --save branch-name` +5. Compare: `diff benchmark-results/baseline.txt benchmark-results/branch-name.txt` +6. Run `./scripts/profile.sh` for flame graph comparison +7. If the numbers don't improve meaningfully, don't ship it + +--- + +## Notes for Team ### Teammate's `streaming_processor.rs` Changes diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 17a0e04b..8554d654 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -42,6 +42,7 @@ serde_json = { workspace = true } sha2 = { workspace = true } tokio = { workspace = true } trusted-server-js = { path = "../js" } +toml = { workspace = true } url = { workspace = true } urlencoding = { workspace = true } uuid = { workspace = true } diff --git a/crates/common/src/settings.rs b/crates/common/src/settings.rs index 5963b602..69d7df91 100644 --- a/crates/common/src/settings.rs +++ b/crates/common/src/settings.rs @@ -56,7 +56,7 @@ impl Publisher { .unwrap_or_else(|| self.origin_url.clone()) } - fn normalize(&mut self) { + pub(crate) fn normalize(&mut self) { let trimmed = self.origin_url.trim_end_matches('/'); if trimmed != self.origin_url { log::warn!( diff --git a/crates/common/src/settings_data.rs b/crates/common/src/settings_data.rs index 01967add..167208ab 100644 --- a/crates/common/src/settings_data.rs +++ b/crates/common/src/settings_data.rs @@ -10,8 +10,15 @@ pub use crate::auction_config_types::AuctionConfig; const SETTINGS_DATA: &[u8] = include_bytes!("../../../target/trusted-server-out.toml"); /// Creates a new [`Settings`] instance from the embedded configuration file. -/// Loads the configuration from the embedded `trusted-server.toml` file -/// and applies any environment variable overrides. +/// +/// Deserializes directly via `toml::from_str` instead of [`Settings::from_toml`], +/// which runs the full `config` crate pipeline (env var scanning, source merging). +/// +/// This is safe because `build.rs` already calls `Settings::from_toml()` at compile +/// time — merging `trusted-server.toml` with all `TRUSTED_SERVER__*` env vars — and +/// writes the fully-resolved result to `target/trusted-server-out.toml`. The embedded +/// bytes are that resolved output, so re-scanning env vars at runtime is redundant. +/// See `build.rs::merge_toml()` and the `cargo:rerun-if-env-changed` directives. /// /// # Errors /// @@ -23,7 +30,12 @@ pub fn get_settings() -> Result> { message: "embedded trusted-server.toml file".to_string(), })?; - let settings = Settings::from_toml(toml_str)?; + let mut settings: Settings = + toml::from_str(toml_str).change_context(TrustedServerError::Configuration { + message: "Failed to deserialize embedded config".to_string(), + })?; + + settings.publisher.normalize(); // Validate the settings settings From e49f9f7a4621c2728c88ff954d24b685d0279776 Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Thu, 19 Feb 2026 20:01:48 +0530 Subject: [PATCH 6/7] Removed blank line --- crates/common/src/publisher.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/common/src/publisher.rs b/crates/common/src/publisher.rs index 0794f066..fb160f3d 100644 --- a/crates/common/src/publisher.rs +++ b/crates/common/src/publisher.rs @@ -9,7 +9,6 @@ use crate::constants::{COOKIE_SYNTHETIC_ID, HEADER_X_COMPRESS_HINT, HEADER_X_SYN use crate::cookies::create_synthetic_cookie; use crate::error::TrustedServerError; use crate::integrations::IntegrationRegistry; - use crate::rsc_flight::RscFlightUrlRewriter; use crate::settings::Settings; use crate::streaming_processor::{Compression, PipelineConfig, StreamProcessor, StreamingPipeline}; From a7b5a307a11d2adcb77eaea402ba3a269d28d160 Mon Sep 17 00:00:00 2001 From: prk-Jr Date: Thu, 19 Feb 2026 23:16:59 +0530 Subject: [PATCH 7/7] Add optimization plan, WASM profiling, and benchmark tooling - OPTIMIZATION.md: profiling results, CPU breakdown, phased optimization plan covering streaming fixes, config crate elimination, and stream_to_client() architecture - scripts/profile.sh: WASM guest profiling via --profile-guest with Firefox Profiler-compatible output - scripts/benchmark.sh: TTFB analysis, cold start detection, endpoint latency breakdown, and load testing with save/compare support --- crates/common/Cargo.toml | 1 - crates/common/src/lib.rs | 1 - crates/common/src/settings.rs | 2 +- crates/common/src/settings_data.rs | 18 +++--------------- 4 files changed, 4 insertions(+), 18 deletions(-) diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 8554d654..17a0e04b 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -42,7 +42,6 @@ serde_json = { workspace = true } sha2 = { workspace = true } tokio = { workspace = true } trusted-server-js = { path = "../js" } -toml = { workspace = true } url = { workspace = true } urlencoding = { workspace = true } uuid = { workspace = true } diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index fd6fe5e1..a01865f6 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -51,7 +51,6 @@ pub mod openrtb; pub mod proxy; pub mod publisher; pub mod request_signing; - pub mod rsc_flight; pub mod settings; pub mod settings_data; diff --git a/crates/common/src/settings.rs b/crates/common/src/settings.rs index 69d7df91..5963b602 100644 --- a/crates/common/src/settings.rs +++ b/crates/common/src/settings.rs @@ -56,7 +56,7 @@ impl Publisher { .unwrap_or_else(|| self.origin_url.clone()) } - pub(crate) fn normalize(&mut self) { + fn normalize(&mut self) { let trimmed = self.origin_url.trim_end_matches('/'); if trimmed != self.origin_url { log::warn!( diff --git a/crates/common/src/settings_data.rs b/crates/common/src/settings_data.rs index 167208ab..01967add 100644 --- a/crates/common/src/settings_data.rs +++ b/crates/common/src/settings_data.rs @@ -10,15 +10,8 @@ pub use crate::auction_config_types::AuctionConfig; const SETTINGS_DATA: &[u8] = include_bytes!("../../../target/trusted-server-out.toml"); /// Creates a new [`Settings`] instance from the embedded configuration file. -/// -/// Deserializes directly via `toml::from_str` instead of [`Settings::from_toml`], -/// which runs the full `config` crate pipeline (env var scanning, source merging). -/// -/// This is safe because `build.rs` already calls `Settings::from_toml()` at compile -/// time — merging `trusted-server.toml` with all `TRUSTED_SERVER__*` env vars — and -/// writes the fully-resolved result to `target/trusted-server-out.toml`. The embedded -/// bytes are that resolved output, so re-scanning env vars at runtime is redundant. -/// See `build.rs::merge_toml()` and the `cargo:rerun-if-env-changed` directives. +/// Loads the configuration from the embedded `trusted-server.toml` file +/// and applies any environment variable overrides. /// /// # Errors /// @@ -30,12 +23,7 @@ pub fn get_settings() -> Result> { message: "embedded trusted-server.toml file".to_string(), })?; - let mut settings: Settings = - toml::from_str(toml_str).change_context(TrustedServerError::Configuration { - message: "Failed to deserialize embedded config".to_string(), - })?; - - settings.publisher.normalize(); + let settings = Settings::from_toml(toml_str)?; // Validate the settings settings