+ * Deprecated: The TRACE ON/OFF SQL mechanism is a legacy anti-pattern from the HTrace era. + * With OpenTelemetry, tracing is always-on and controlled by sampling at the infrastructure level + * (via {@code OTEL_TRACES_SAMPLER}), not per-connection via SQL commands. Users should use the + * OpenTelemetry Java Agent for automatic tracing instead. + *
+ *+ * For backward compatibility, {@code TRACE ON} is now a no-op that returns the current trace ID if + * an active OTel span exists (e.g., from the Java Agent), or 0 if no span is active. + * {@code TRACE OFF} is also a no-op that returns 0. No spans are created or stored on the + * connection. + *
+ * @deprecated Use the OpenTelemetry Java Agent for automatic tracing. TRACE ON/OFF are no-ops. + */ +@Deprecated public class TraceQueryPlan implements QueryPlan { + private static final Logger LOG = LoggerFactory.getLogger(TraceQueryPlan.class); + + /** + * Log the deprecation warning at most once per JVM to avoid log spam. + */ + private static volatile boolean deprecationWarningLogged = false; + private TraceStatement traceStatement = null; private PhoenixStatement stmt = null; private StatementContext context = null; @@ -123,11 +147,8 @@ public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throw @Override public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - final PhoenixConnection conn = stmt.getConnection(); - if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) { - return ResultIterator.EMPTY_ITERATOR; - } - return new TraceQueryResultIterator(conn); + logDeprecationWarning(); + return new TraceQueryResultIterator(); } @Override @@ -240,13 +261,22 @@ public boolean isApplicable() { return true; } - private class TraceQueryResultIterator implements ResultIterator { - - private final PhoenixConnection conn; - - public TraceQueryResultIterator(PhoenixConnection conn) { - this.conn = conn; + private static void logDeprecationWarning() { + if (!deprecationWarningLogged) { + deprecationWarningLogged = true; + LOG.warn("TRACE ON/OFF SQL commands are deprecated and are " + + "now no-ops. Tracing is automatically handled by the " + + "OpenTelemetry Java Agent. Configure sampling via " + + "OTEL_TRACES_SAMPLER environment variable. " + + "See https://phoenix.apache.org/tracing.html " + "for details."); } + } + + /** + * Result iterator that returns the current OTel trace ID (if any active span exists) without + * creating or managing any spans. This is a backward-compatible no-op. + */ + private class TraceQueryResultIterator implements ResultIterator { @Override public void close() throws SQLException { @@ -254,30 +284,32 @@ public void close() throws SQLException { @Override public Tuple next() throws SQLException { - if (!first) return null; - TraceScope traceScope = conn.getTraceScope(); - if (traceStatement.isTraceOn()) { - conn.setSampler(Tracing.getConfiguredSampler(traceStatement)); - if (conn.getSampler() == Sampler.NEVER) { - closeTraceScope(conn); - } - if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) { - traceScope = Tracing.startNewSpan(conn, "Enabling trace"); - if (traceScope.getSpan() != null) { - conn.setTraceScope(traceScope); - } else { - closeTraceScope(conn); - } - } - } else { - closeTraceScope(conn); - conn.setSampler(Sampler.NEVER); + if (!first) { + return null; } - if (traceScope == null || traceScope.getSpan() == null) return null; first = false; + + // Read the current span from OTel context (e.g., set by + // the Java Agent). We never create or store spans — just + // observe what's already there. + Span currentSpan = Span.current(); + SpanContext spanContext = currentSpan.getSpanContext(); + + long traceIdLong = 0L; + if (spanContext.isValid()) { + traceIdLong = parseTraceIdAsLong(spanContext.getTraceId()); + if (traceStatement.isTraceOn()) { + LOG.info("TRACE ON (no-op): active OTel trace ID = {}", spanContext.getTraceId()); + } else { + LOG.info("TRACE OFF (no-op): active OTel trace ID = {}", spanContext.getTraceId()); + } + } + + // Return the trace ID to the client for backward compat. + // Returns 0 if no active span exists. ImmutableBytesWritable ptr = new ImmutableBytesWritable(); ParseNodeFactory factory = new ParseNodeFactory(); - LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId()); + LiteralParseNode literal = factory.literal(traceIdLong); LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS); expression.evaluate(null, ptr); @@ -290,11 +322,18 @@ public Tuple next() throws SQLException { return new ResultTuple(Result.create(cells)); } - private void closeTraceScope(final PhoenixConnection conn) { - if (conn.getTraceScope() != null) { - conn.getTraceScope().close(); - conn.setTraceScope(null); + /** + * Parse the first 16 hex characters of an OTel trace ID as a long. OTel trace IDs are + * 32-character hex strings (128 bits). We take the lower 64 bits for backward compatibility + * with the old HTrace long trace IDs. + */ + private long parseTraceIdAsLong(String traceId) { + if (traceId == null || traceId.length() < 16) { + return 0L; } + // Take the last 16 hex chars (lower 64 bits) + String lower64 = traceId.substring(traceId.length() - 16); + return Long.parseUnsignedLong(lower64, 16); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java index e0dbc5afad9..5ba4b8a309d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java @@ -17,6 +17,8 @@ */ package org.apache.phoenix.execute; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -33,7 +35,6 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; -import org.apache.htrace.TraceScope; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; @@ -72,8 +73,8 @@ import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.trace.TracingIterator; -import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.LogUtil; @@ -364,10 +365,11 @@ public final ResultIterator iterator(final Map
- * Just a copy of the same from Hadoop, but exposed for usage.
- */
-public class MetricsInfoImpl implements MetricsInfo {
- private final String name, description;
-
- MetricsInfoImpl(String name, String description) {
- this.name = checkNotNull(name, "name");
- this.description = checkNotNull(description, "description");
- }
-
- @Override
- public String name() {
- return name;
- }
-
- @Override
- public String description() {
- return description;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof MetricsInfo) {
- MetricsInfo other = (MetricsInfo) obj;
- return Objects.equal(name, other.name()) && Objects.equal(description, other.description());
- }
- return false;
- }
-
- @Override
- public int hashCode() {
- return Objects.hashCode(name, description);
- }
-
- @Override
- public String toString() {
- return MoreObjects.toStringHelper(this).add("name", name).add("description", description)
- .toString();
- }
-}
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
deleted file mode 100644
index 1239514f702..00000000000
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION;
-import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION;
-import static org.apache.phoenix.metrics.MetricInfo.END;
-import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
-import static org.apache.phoenix.metrics.MetricInfo.PARENT;
-import static org.apache.phoenix.metrics.MetricInfo.SPAN;
-import static org.apache.phoenix.metrics.MetricInfo.START;
-import static org.apache.phoenix.metrics.MetricInfo.TAG;
-import static org.apache.phoenix.metrics.MetricInfo.TRACE;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricsRecord;
-import org.apache.hadoop.metrics2.MetricsSink;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.phoenix.compile.MutationPlan;
-import org.apache.phoenix.execute.MutationState;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
-import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.Metrics;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.trace.util.Tracing;
-import org.apache.phoenix.util.QueryUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.phoenix.thirdparty.com.google.common.base.Joiner;
-
-/**
- * Write the metrics to a phoenix table. Generally, this class is instantiated via hadoop-metrics2
- * property files. Specifically, you would create this class by adding the following to by This
- * would actually be set as:
- * [prefix].sink.[some instance name].class=org.apache.phoenix.trace.PhoenixMetricsSink
- * , where prefix is either:
- *