Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions bin/hadoop-metrics2-hbase.properties
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@
# Configuration for the metrics2 system for the HBase RegionServers
# to enable phoenix trace collection on the HBase servers.
#
# NOTE: The legacy PhoenixMetricsSink has been removed as part of the
# migration from HTrace to OpenTelemetry. Trace export is now handled
# by the OpenTelemetry Java Agent. Configure the agent via environment
# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to
# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend.
#
# See hadoop-metrics2-phoenix.properties for how these configurations
# are utilized.
#
Expand All @@ -28,9 +34,7 @@
# properties should be added to the file of the same name on
# the HBase classpath (likely in the HBase conf/ folder)

# ensure that we receive traces on the server
hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# Tell the sink where to write the metrics
hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# Only handle traces with a context of "tracing"
hbase.sink.tracing.context=tracing
# Legacy PhoenixMetricsSink configuration (removed - use OpenTelemetry agent instead):
# hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# hbase.sink.tracing.context=tracing
33 changes: 11 additions & 22 deletions bin/hadoop-metrics2-phoenix.properties
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,6 @@
# not zero-length). It is only there to differentiate the properties that are stored for
# objects of the same type (e.g. differentiating between two phoenix.sink objects).
#
#You could the following lines in your config
#
# phoenix.sink.thingA.class=com.your-company.SpecialSink
# phoenix.sink.thingA.option1=value1
#
# and also
#
# phoenix.sink.thingB.class=org.apache.phoenix.trace.PhoenixMetricsSink
# phoenix.sink.thingB.doGoodStuff=true
#
# which will create both SpecialSink and PhoenixMetricsSink and register them
# as a MetricsSink, but Special sink will only see option1=value1 in its
# configuration, which similarly, the instantiated PhoenixMetricsSink will
# only see doGoodStuff=true in its configuration
#
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail

# Uncomment to NOT start MBeans
Expand All @@ -60,11 +45,15 @@
# Sample from all the sources every 10 seconds
*.period=10

# Write Traces to Phoenix
# Write Traces to Phoenix (LEGACY - removed)
##########################
# ensure that we receive traces on the server
phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# Tell the sink where to write the metrics
phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# Only handle traces with a context of "tracing"
phoenix.sink.tracing.context=tracing
# NOTE: The legacy PhoenixMetricsSink has been removed as part of the
# migration from HTrace to OpenTelemetry. Trace export is now handled
# by the OpenTelemetry Java Agent. Configure the agent via environment
# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to
# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend.
#
# Legacy configuration (removed - use OpenTelemetry agent instead):
# phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink
# phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter
# phoenix.sink.tracing.context=tracing
5 changes: 3 additions & 2 deletions phoenix-client-parent/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,6 @@
<shadedPattern>${shaded.package}.org.</shadedPattern>
<excludes>
<exclude>org/apache/hadoop/**</exclude>
<!-- Our non-shaded htrace and logging libraries -->
<exclude>org/apache/htrace/**</exclude>
<exclude>org/slf4j/**</exclude>
<exclude>org/apache/commons/logging/**</exclude>
<exclude>org/apache/log4j/**</exclude>
Expand Down Expand Up @@ -183,6 +181,9 @@
<exclude>io/skip/checksum/errors</exclude>
<exclude>io/sort/*</exclude>
<exclude>io/serializations</exclude>
<!-- OpenTelemetry API must NOT be shaded so that Phoenix uses the same
GlobalOpenTelemetry class as the Java Agent. See PHOENIX-5215. -->
<exclude>io/opentelemetry/**</exclude>
</excludes>
</relocation>
<!-- JSRs that haven't made it to inclusion in J2SE -->
Expand Down
21 changes: 17 additions & 4 deletions phoenix-core-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,6 @@
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
Expand Down Expand Up @@ -250,6 +246,23 @@
<groupId>org.hdrhistogram</groupId>
<artifactId>HdrHistogram</artifactId>
</dependency>

<!-- OpenTelemetry API (versions managed by opentelemetry-bom in parent).
Scope is "provided" because at runtime the OTel API classes are already on the
HBase classpath (HBase depends on opentelemetry-api). The Java Agent provides
the SDK implementation. Phoenix must NOT shade these classes. See PHOENIX-5215. -->
<dependency>
<groupId>io.opentelemetry</groupId>
<artifactId>opentelemetry-api</artifactId>
<scope>provided</scope>
</dependency>
<!-- OpenTelemetry Context (transitive from opentelemetry-api, but declared
explicitly to satisfy dependency analysis). -->
<dependency>
<groupId>io.opentelemetry</groupId>
<artifactId>opentelemetry-context</artifactId>
<scope>provided</scope>
</dependency>
</dependencies>

<build>
Expand Down
4 changes: 2 additions & 2 deletions phoenix-core-client/src/main/antlr3/PhoenixSQL.g
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ import org.apache.phoenix.schema.types.PUnsignedTime;
import org.apache.phoenix.schema.types.PUnsignedTimestamp;
import org.apache.phoenix.util.SchemaUtil;
import org.apache.phoenix.parse.LikeParseNode.LikeType;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.trace.PhoenixTracing;
import org.apache.phoenix.parse.AddJarsStatement;
import org.apache.phoenix.parse.ExplainType;
}
Expand Down Expand Up @@ -724,7 +724,7 @@ alter_index_node returns [AlterIndexStatement ret]
// Parse a trace statement.
trace_node returns [TraceStatement ret]
: TRACE ((flag = ON ( WITH SAMPLING s = sampling_rate)?) | flag = OFF)
{ret = factory.trace(Tracing.isTraceOn(flag.getText()), s == null ? Tracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());}
{ret = factory.trace(PhoenixTracing.isTraceOn(flag.getText()), s == null ? PhoenixTracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());}
;

// Parse a create function statement.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
*/
package org.apache.phoenix.compile;

import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.SpanContext;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
Expand All @@ -28,8 +30,6 @@
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.htrace.Sampler;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
Expand All @@ -41,7 +41,6 @@
import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
import org.apache.phoenix.metrics.MetricInfo;
Expand All @@ -61,14 +60,39 @@
import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.EnvironmentEdgeManager;
import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.apache.phoenix.util.SizedUtil;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Query plan for the {@code TRACE ON} / {@code TRACE OFF} SQL commands.
* <p>
* <b>Deprecated:</b> The TRACE ON/OFF SQL mechanism is a legacy anti-pattern from the HTrace era.
* With OpenTelemetry, tracing is always-on and controlled by sampling at the infrastructure level
* (via {@code OTEL_TRACES_SAMPLER}), not per-connection via SQL commands. Users should use the
* OpenTelemetry Java Agent for automatic tracing instead.
* </p>
* <p>
* For backward compatibility, {@code TRACE ON} is now a no-op that returns the current trace ID if
* an active OTel span exists (e.g., from the Java Agent), or 0 if no span is active.
* {@code TRACE OFF} is also a no-op that returns 0. No spans are created or stored on the
* connection.
* </p>
* @deprecated Use the OpenTelemetry Java Agent for automatic tracing. TRACE ON/OFF are no-ops.
*/
@Deprecated
public class TraceQueryPlan implements QueryPlan {

private static final Logger LOG = LoggerFactory.getLogger(TraceQueryPlan.class);

/**
* Log the deprecation warning at most once per JVM to avoid log spam.
*/
private static volatile boolean deprecationWarningLogged = false;

private TraceStatement traceStatement = null;
private PhoenixStatement stmt = null;
private StatementContext context = null;
Expand Down Expand Up @@ -123,11 +147,8 @@ public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throw

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
final PhoenixConnection conn = stmt.getConnection();
if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
return ResultIterator.EMPTY_ITERATOR;
}
return new TraceQueryResultIterator(conn);
logDeprecationWarning();
return new TraceQueryResultIterator();
}

@Override
Expand Down Expand Up @@ -240,44 +261,55 @@ public boolean isApplicable() {
return true;
}

private class TraceQueryResultIterator implements ResultIterator {

private final PhoenixConnection conn;

public TraceQueryResultIterator(PhoenixConnection conn) {
this.conn = conn;
private static void logDeprecationWarning() {
if (!deprecationWarningLogged) {
deprecationWarningLogged = true;
LOG.warn("TRACE ON/OFF SQL commands are deprecated and are "
+ "now no-ops. Tracing is automatically handled by the "
+ "OpenTelemetry Java Agent. Configure sampling via "
+ "OTEL_TRACES_SAMPLER environment variable. "
+ "See https://phoenix.apache.org/tracing.html " + "for details.");
}
}

/**
* Result iterator that returns the current OTel trace ID (if any active span exists) without
* creating or managing any spans. This is a backward-compatible no-op.
*/
private class TraceQueryResultIterator implements ResultIterator {

@Override
public void close() throws SQLException {
}

@Override
public Tuple next() throws SQLException {
if (!first) return null;
TraceScope traceScope = conn.getTraceScope();
if (traceStatement.isTraceOn()) {
conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
if (conn.getSampler() == Sampler.NEVER) {
closeTraceScope(conn);
}
if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
traceScope = Tracing.startNewSpan(conn, "Enabling trace");
if (traceScope.getSpan() != null) {
conn.setTraceScope(traceScope);
} else {
closeTraceScope(conn);
}
}
} else {
closeTraceScope(conn);
conn.setSampler(Sampler.NEVER);
if (!first) {
return null;
}
if (traceScope == null || traceScope.getSpan() == null) return null;
first = false;

// Read the current span from OTel context (e.g., set by
// the Java Agent). We never create or store spans — just
// observe what's already there.
Span currentSpan = Span.current();
SpanContext spanContext = currentSpan.getSpanContext();

long traceIdLong = 0L;
if (spanContext.isValid()) {
traceIdLong = parseTraceIdAsLong(spanContext.getTraceId());
if (traceStatement.isTraceOn()) {
LOG.info("TRACE ON (no-op): active OTel trace ID = {}", spanContext.getTraceId());
} else {
LOG.info("TRACE OFF (no-op): active OTel trace ID = {}", spanContext.getTraceId());
}
}

// Return the trace ID to the client for backward compat.
// Returns 0 if no active span exists.
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ParseNodeFactory factory = new ParseNodeFactory();
LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId());
LiteralParseNode literal = factory.literal(traceIdLong);
LiteralExpression expression =
LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS);
expression.evaluate(null, ptr);
Expand All @@ -290,11 +322,18 @@ public Tuple next() throws SQLException {
return new ResultTuple(Result.create(cells));
}

private void closeTraceScope(final PhoenixConnection conn) {
if (conn.getTraceScope() != null) {
conn.getTraceScope().close();
conn.setTraceScope(null);
/**
* Parse the first 16 hex characters of an OTel trace ID as a long. OTel trace IDs are
* 32-character hex strings (128 bits). We take the lower 64 bits for backward compatibility
* with the old HTrace long trace IDs.
*/
private long parseTraceIdAsLong(String traceId) {
if (traceId == null || traceId.length() < 16) {
return 0L;
}
// Take the last 16 hex chars (lower 64 bits)
String lower64 = traceId.substring(traceId.length() - 16);
return Long.parseUnsignedLong(lower64, 16);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
*/
package org.apache.phoenix.execute;

import io.opentelemetry.api.trace.Span;
import io.opentelemetry.context.Scope;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
Expand All @@ -33,7 +35,6 @@
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.ExplainPlanAttributes;
Expand Down Expand Up @@ -72,8 +73,8 @@
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.trace.PhoenixTracing;
import org.apache.phoenix.trace.TracingIterator;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.LogUtil;
Expand Down Expand Up @@ -364,10 +365,11 @@ public final ResultIterator iterator(final Map<ImmutableBytesPtr, ServerCache> c
}

// wrap the iterator so we start/end tracing as we expect
if (Tracing.isTracing()) {
TraceScope scope = Tracing.startNewSpan(context.getConnection(),
"Creating basic query for " + getPlanSteps(iterator));
if (scope.getSpan() != null) return new TracingIterator(scope, iterator);
if (PhoenixTracing.isRecording()) {
Span span = PhoenixTracing
.createSpan("phoenix.query.execute." + context.getCurrentTable().getTable().getName());
Scope scope = span.makeCurrent();
return new TracingIterator(span, scope, iterator);
}
return iterator;
}
Expand Down
Loading