diff --git a/bin/hadoop-metrics2-hbase.properties b/bin/hadoop-metrics2-hbase.properties index bafd44492f0..ef60aae1607 100644 --- a/bin/hadoop-metrics2-hbase.properties +++ b/bin/hadoop-metrics2-hbase.properties @@ -20,6 +20,12 @@ # Configuration for the metrics2 system for the HBase RegionServers # to enable phoenix trace collection on the HBase servers. # +# NOTE: The legacy PhoenixMetricsSink has been removed as part of the +# migration from HTrace to OpenTelemetry. Trace export is now handled +# by the OpenTelemetry Java Agent. Configure the agent via environment +# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to +# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend. +# # See hadoop-metrics2-phoenix.properties for how these configurations # are utilized. # @@ -28,9 +34,7 @@ # properties should be added to the file of the same name on # the HBase classpath (likely in the HBase conf/ folder) -# ensure that we receive traces on the server -hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink -# Tell the sink where to write the metrics -hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter -# Only handle traces with a context of "tracing" -hbase.sink.tracing.context=tracing +# Legacy PhoenixMetricsSink configuration (removed - use OpenTelemetry agent instead): +# hbase.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink +# hbase.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter +# hbase.sink.tracing.context=tracing diff --git a/bin/hadoop-metrics2-phoenix.properties b/bin/hadoop-metrics2-phoenix.properties index f8c72231765..3d92dbb8e6d 100644 --- a/bin/hadoop-metrics2-phoenix.properties +++ b/bin/hadoop-metrics2-phoenix.properties @@ -37,21 +37,6 @@ # not zero-length). It is only there to differentiate the properties that are stored for # objects of the same type (e.g. differentiating between two phoenix.sink objects). # -#You could the following lines in your config -# -# phoenix.sink.thingA.class=com.your-company.SpecialSink -# phoenix.sink.thingA.option1=value1 -# -# and also -# -# phoenix.sink.thingB.class=org.apache.phoenix.trace.PhoenixMetricsSink -# phoenix.sink.thingB.doGoodStuff=true -# -# which will create both SpecialSink and PhoenixMetricsSink and register them -# as a MetricsSink, but Special sink will only see option1=value1 in its -# configuration, which similarly, the instantiated PhoenixMetricsSink will -# only see doGoodStuff=true in its configuration -# # See javadoc of package-info.java for org.apache.hadoop.metrics2 for detail # Uncomment to NOT start MBeans @@ -60,11 +45,15 @@ # Sample from all the sources every 10 seconds *.period=10 -# Write Traces to Phoenix +# Write Traces to Phoenix (LEGACY - removed) ########################## -# ensure that we receive traces on the server -phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink -# Tell the sink where to write the metrics -phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter -# Only handle traces with a context of "tracing" -phoenix.sink.tracing.context=tracing +# NOTE: The legacy PhoenixMetricsSink has been removed as part of the +# migration from HTrace to OpenTelemetry. Trace export is now handled +# by the OpenTelemetry Java Agent. Configure the agent via environment +# variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT) to export traces to +# Jaeger, Tempo, Zipkin, or any OTLP-compatible backend. +# +# Legacy configuration (removed - use OpenTelemetry agent instead): +# phoenix.sink.tracing.class=org.apache.phoenix.trace.PhoenixMetricsSink +# phoenix.sink.tracing.writer-class=org.apache.phoenix.trace.PhoenixTableMetricsWriter +# phoenix.sink.tracing.context=tracing diff --git a/phoenix-client-parent/pom.xml b/phoenix-client-parent/pom.xml index 47b62405c36..ade3d57d579 100644 --- a/phoenix-client-parent/pom.xml +++ b/phoenix-client-parent/pom.xml @@ -137,8 +137,6 @@ ${shaded.package}.org. org/apache/hadoop/** - - org/apache/htrace/** org/slf4j/** org/apache/commons/logging/** org/apache/log4j/** @@ -183,6 +181,9 @@ io/skip/checksum/errors io/sort/* io/serializations + + io/opentelemetry/** diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml index 6c6cea359e6..65952b47c73 100644 --- a/phoenix-core-client/pom.xml +++ b/phoenix-core-client/pom.xml @@ -194,10 +194,6 @@ com.google.protobuf protobuf-java - - org.apache.htrace - htrace-core - org.slf4j slf4j-api @@ -250,6 +246,23 @@ org.hdrhistogram HdrHistogram + + + + io.opentelemetry + opentelemetry-api + provided + + + + io.opentelemetry + opentelemetry-context + provided + diff --git a/phoenix-core-client/src/main/antlr3/PhoenixSQL.g b/phoenix-core-client/src/main/antlr3/PhoenixSQL.g index 945d981f264..c9f06087570 100644 --- a/phoenix-core-client/src/main/antlr3/PhoenixSQL.g +++ b/phoenix-core-client/src/main/antlr3/PhoenixSQL.g @@ -225,7 +225,7 @@ import org.apache.phoenix.schema.types.PUnsignedTime; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.parse.LikeParseNode.LikeType; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.parse.AddJarsStatement; import org.apache.phoenix.parse.ExplainType; } @@ -724,7 +724,7 @@ alter_index_node returns [AlterIndexStatement ret] // Parse a trace statement. trace_node returns [TraceStatement ret] : TRACE ((flag = ON ( WITH SAMPLING s = sampling_rate)?) | flag = OFF) - {ret = factory.trace(Tracing.isTraceOn(flag.getText()), s == null ? Tracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());} + {ret = factory.trace(PhoenixTracing.isTraceOn(flag.getText()), s == null ? PhoenixTracing.isTraceOn(flag.getText()) ? 1.0 : 0.0 : (((BigDecimal)s.getValue())).doubleValue());} ; // Parse a create function statement. diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java index 32d309ab7b5..4265a56694d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/compile/TraceQueryPlan.java @@ -17,6 +17,8 @@ */ package org.apache.phoenix.compile; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; import java.sql.ParameterMetaData; import java.sql.SQLException; import java.util.ArrayList; @@ -28,8 +30,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.htrace.Sampler; -import org.apache.htrace.TraceScope; import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder; import org.apache.phoenix.compile.GroupByCompiler.GroupBy; import org.apache.phoenix.compile.OrderByCompiler.OrderBy; @@ -41,7 +41,6 @@ import org.apache.phoenix.iterate.DefaultParallelScanGrouper; import org.apache.phoenix.iterate.ParallelScanGrouper; import org.apache.phoenix.iterate.ResultIterator; -import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.jdbc.PhoenixStatement.Operation; import org.apache.phoenix.metrics.MetricInfo; @@ -61,14 +60,39 @@ import org.apache.phoenix.schema.tuple.ResultTuple; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PLong; -import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.PhoenixKeyValueUtil; import org.apache.phoenix.util.SizedUtil; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Query plan for the {@code TRACE ON} / {@code TRACE OFF} SQL commands. + *

+ * Deprecated: The TRACE ON/OFF SQL mechanism is a legacy anti-pattern from the HTrace era. + * With OpenTelemetry, tracing is always-on and controlled by sampling at the infrastructure level + * (via {@code OTEL_TRACES_SAMPLER}), not per-connection via SQL commands. Users should use the + * OpenTelemetry Java Agent for automatic tracing instead. + *

+ *

+ * For backward compatibility, {@code TRACE ON} is now a no-op that returns the current trace ID if + * an active OTel span exists (e.g., from the Java Agent), or 0 if no span is active. + * {@code TRACE OFF} is also a no-op that returns 0. No spans are created or stored on the + * connection. + *

+ * @deprecated Use the OpenTelemetry Java Agent for automatic tracing. TRACE ON/OFF are no-ops. + */ +@Deprecated public class TraceQueryPlan implements QueryPlan { + private static final Logger LOG = LoggerFactory.getLogger(TraceQueryPlan.class); + + /** + * Log the deprecation warning at most once per JVM to avoid log spam. + */ + private static volatile boolean deprecationWarningLogged = false; + private TraceStatement traceStatement = null; private PhoenixStatement stmt = null; private StatementContext context = null; @@ -123,11 +147,8 @@ public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throw @Override public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException { - final PhoenixConnection conn = stmt.getConnection(); - if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) { - return ResultIterator.EMPTY_ITERATOR; - } - return new TraceQueryResultIterator(conn); + logDeprecationWarning(); + return new TraceQueryResultIterator(); } @Override @@ -240,13 +261,22 @@ public boolean isApplicable() { return true; } - private class TraceQueryResultIterator implements ResultIterator { - - private final PhoenixConnection conn; - - public TraceQueryResultIterator(PhoenixConnection conn) { - this.conn = conn; + private static void logDeprecationWarning() { + if (!deprecationWarningLogged) { + deprecationWarningLogged = true; + LOG.warn("TRACE ON/OFF SQL commands are deprecated and are " + + "now no-ops. Tracing is automatically handled by the " + + "OpenTelemetry Java Agent. Configure sampling via " + + "OTEL_TRACES_SAMPLER environment variable. " + + "See https://phoenix.apache.org/tracing.html " + "for details."); } + } + + /** + * Result iterator that returns the current OTel trace ID (if any active span exists) without + * creating or managing any spans. This is a backward-compatible no-op. + */ + private class TraceQueryResultIterator implements ResultIterator { @Override public void close() throws SQLException { @@ -254,30 +284,32 @@ public void close() throws SQLException { @Override public Tuple next() throws SQLException { - if (!first) return null; - TraceScope traceScope = conn.getTraceScope(); - if (traceStatement.isTraceOn()) { - conn.setSampler(Tracing.getConfiguredSampler(traceStatement)); - if (conn.getSampler() == Sampler.NEVER) { - closeTraceScope(conn); - } - if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) { - traceScope = Tracing.startNewSpan(conn, "Enabling trace"); - if (traceScope.getSpan() != null) { - conn.setTraceScope(traceScope); - } else { - closeTraceScope(conn); - } - } - } else { - closeTraceScope(conn); - conn.setSampler(Sampler.NEVER); + if (!first) { + return null; } - if (traceScope == null || traceScope.getSpan() == null) return null; first = false; + + // Read the current span from OTel context (e.g., set by + // the Java Agent). We never create or store spans — just + // observe what's already there. + Span currentSpan = Span.current(); + SpanContext spanContext = currentSpan.getSpanContext(); + + long traceIdLong = 0L; + if (spanContext.isValid()) { + traceIdLong = parseTraceIdAsLong(spanContext.getTraceId()); + if (traceStatement.isTraceOn()) { + LOG.info("TRACE ON (no-op): active OTel trace ID = {}", spanContext.getTraceId()); + } else { + LOG.info("TRACE OFF (no-op): active OTel trace ID = {}", spanContext.getTraceId()); + } + } + + // Return the trace ID to the client for backward compat. + // Returns 0 if no active span exists. ImmutableBytesWritable ptr = new ImmutableBytesWritable(); ParseNodeFactory factory = new ParseNodeFactory(); - LiteralParseNode literal = factory.literal(traceScope.getSpan().getTraceId()); + LiteralParseNode literal = factory.literal(traceIdLong); LiteralExpression expression = LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE, Determinism.ALWAYS); expression.evaluate(null, ptr); @@ -290,11 +322,18 @@ public Tuple next() throws SQLException { return new ResultTuple(Result.create(cells)); } - private void closeTraceScope(final PhoenixConnection conn) { - if (conn.getTraceScope() != null) { - conn.getTraceScope().close(); - conn.setTraceScope(null); + /** + * Parse the first 16 hex characters of an OTel trace ID as a long. OTel trace IDs are + * 32-character hex strings (128 bits). We take the lower 64 bits for backward compatibility + * with the old HTrace long trace IDs. + */ + private long parseTraceIdAsLong(String traceId) { + if (traceId == null || traceId.length() < 16) { + return 0L; } + // Take the last 16 hex chars (lower 64 bits) + String lower64 = traceId.substring(traceId.length() - 16); + return Long.parseUnsignedLong(lower64, 16); } @Override diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java index e0dbc5afad9..5ba4b8a309d 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java @@ -17,6 +17,8 @@ */ package org.apache.phoenix.execute; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.IOException; @@ -33,7 +35,6 @@ import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; -import org.apache.htrace.TraceScope; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; import org.apache.phoenix.compile.ExplainPlan; import org.apache.phoenix.compile.ExplainPlanAttributes; @@ -72,8 +73,8 @@ import org.apache.phoenix.schema.PTable.IndexType; import org.apache.phoenix.schema.PTableType; import org.apache.phoenix.schema.TableRef; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.trace.TracingIterator; -import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.IndexUtil; import org.apache.phoenix.util.LogUtil; @@ -364,10 +365,11 @@ public final ResultIterator iterator(final Map c } // wrap the iterator so we start/end tracing as we expect - if (Tracing.isTracing()) { - TraceScope scope = Tracing.startNewSpan(context.getConnection(), - "Creating basic query for " + getPlanSteps(iterator)); - if (scope.getSpan() != null) return new TracingIterator(scope, iterator); + if (PhoenixTracing.isRecording()) { + Span span = PhoenixTracing + .createSpan("phoenix.query.execute." + context.getCurrentTable().getTable().getName()); + Scope scope = span.makeCurrent(); + return new TracingIterator(span, scope, iterator); } return iterator; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java index 52517a884df..0dc971001c0 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/execute/MutationState.java @@ -41,6 +41,9 @@ import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB; import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.context.Scope; import java.io.IOException; import java.sql.SQLException; import java.sql.Timestamp; @@ -68,8 +71,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.htrace.Span; -import org.apache.htrace.TraceScope; import org.apache.phoenix.cache.ServerCacheClient.ServerCache; import org.apache.phoenix.compile.MutationPlan; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; @@ -118,7 +119,7 @@ import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PTimestamp; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.transaction.PhoenixTransactionContext.PhoenixVisibilityLevel; import org.apache.phoenix.transaction.TransactionFactory; @@ -1407,8 +1408,9 @@ private void sendBatch(Map commitBatch, long[] Map> physicalTableMutationMap = Maps.newLinkedHashMap(); // add tracing for this operation - try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) { - Span span = trace.getSpan(); + Span span = PhoenixTracing.createSpan("phoenix.mutation.commit"); + try (Scope ignored = span.makeCurrent()) { + span.setAttribute("phoenix.mutation.tables", (long) commitBatch.size()); ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable(); for (Map.Entry entry : commitBatch.entrySet()) { // at this point we are going through mutations for each table @@ -1484,6 +1486,15 @@ private void sendBatch(Map commitBatch, long[] "Ignoring exception that happened during setting index verified value to verified=TRUE ", ex); } + span.setStatus(StatusCode.OK); + } catch (Throwable t) { + PhoenixTracing.setError(span, t); + if (t instanceof SQLException) { + throw (SQLException) t; + } + throw new SQLException(t); + } finally { + span.end(); } } @@ -1503,7 +1514,7 @@ private void sendMutations(Iterator>> mutationsI // create a span per target table // TODO maybe we can be smarter about the table name to string here? Span child = - Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName)); + PhoenixTracing.createSpan("phoenix.mutation.batch.write." + Bytes.toString(htableName)); int retryCount = 0; boolean shouldRetry = false; @@ -1560,7 +1571,7 @@ private void sendMutations(Iterator>> mutationsI GLOBAL_MUTATION_BATCH_SIZE.update(numMutations); totalMutationBytesObject = calculateMutationSize(mutationList, true); - child.addTimelineAnnotation("Attempt " + retryCount); + child.addEvent("Attempt " + retryCount); Iterator> itrListMutation = mutationBatchList.iterator(); while (itrListMutation.hasNext()) { final List mutationBatch = itrListMutation.next(); @@ -1647,7 +1658,7 @@ private IOException updateTableRegionCacheIfNecessary(IOException ioe) { if (LOGGER.isDebugEnabled()) LOGGER.debug( "Sent batch of " + mutationBatch.size() + " for " + Bytes.toString(htableName)); } - child.stop(); + child.end(); shouldRetry = false; numFailedMutations = 0; @@ -1682,9 +1693,9 @@ private IOException updateTableRegionCacheIfNecessary(IOException ioe) { connection.getQueryServices().clearTableRegionCache(TableName.valueOf(htableName)); // add a new child span as this one failed - child.addTimelineAnnotation(msg); - child.stop(); - child = Tracing.child(span, "Failed batch, attempting retry"); + child.addEvent(msg); + child.end(); + child = PhoenixTracing.createSpan("phoenix.mutation.batch.retry"); continue; } else diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java index 4d30d09722e..46ea9c0efe8 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java @@ -36,7 +36,7 @@ import org.apache.phoenix.monitoring.ReadMetricQueue; import org.apache.phoenix.monitoring.ScanMetricsHolder; import org.apache.phoenix.monitoring.TaskExecutionMetricsHolder; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.ScanUtil; @@ -127,7 +127,7 @@ protected void submitWork(final List> nestedScans, scan, scanMetricsHolder, renewLeaseThreshold, plan, scanGrouper, caches, maxQueryEndTime); context.getConnection().addIteratorForLeaseRenewal(tableResultItr); Future future = - executor.submit(Tracing.wrap(new JobCallable() { + executor.submit(PhoenixTracing.wrap(new JobCallable() { @Override public PeekingResultIterator call() throws Exception { @@ -171,7 +171,7 @@ public Object getJobId() { public TaskExecutionMetricsHolder getTaskExecutionMetric() { return taskMetrics; } - }, "Parallel scanner for table: " + tableRef.getTable().getPhysicalName().getString())); + })); // Add our future in the right place so that we can concatenate the // results of the inner futures versus merge sorting across all of them. nestedFutures.get(scanLocation.getOuterListIndex()).set(scanLocation.getInnerListIndex(), diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java index 8b23073396f..6e958f02791 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/SerialIterators.java @@ -41,7 +41,7 @@ import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.tuple.Tuple; import org.apache.phoenix.schema.types.PInteger; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.LogUtil; import org.apache.phoenix.util.QueryUtil; @@ -98,7 +98,7 @@ protected void submitWork(final List> nestedScans, } final List finalScans = flattenedScans; Future future = - executor.submit(Tracing.wrap(new JobCallable() { + executor.submit(PhoenixTracing.wrap(new JobCallable() { @Override public PeekingResultIterator call() throws Exception { PeekingResultIterator itr = new SerialIterator(finalScans, tableName, @@ -120,7 +120,7 @@ public Object getJobId() { public TaskExecutionMetricsHolder getTaskExecutionMetric() { return taskMetrics; } - }, "Serial scanner for table: " + tableRef.getTable().getPhysicalName().getString())); + })); // Add our singleton Future which will execute serially nestedFutures.add(Collections.singletonList( new Pair>(flattenedScans.get(0), future))); diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java index 44ab9c99ae4..265cbc31c9c 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java @@ -65,8 +65,6 @@ import javax.annotation.Nullable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Consistency; -import org.apache.htrace.Sampler; -import org.apache.htrace.TraceScope; import org.apache.phoenix.call.CallRunner; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.exception.FailoverSQLException; @@ -120,7 +118,7 @@ import org.apache.phoenix.schema.types.PUnsignedTime; import org.apache.phoenix.schema.types.PUnsignedTimestamp; import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.DateUtil; @@ -168,10 +166,8 @@ public class PhoenixConnection private final String timePattern; private final String timestampPattern; private int statementExecutionCounter; - private TraceScope traceScope = null; private volatile boolean isClosed = false; private volatile boolean isClosing = false; - private Sampler sampler; private boolean readOnly = false; private Consistency consistency = Consistency.STRONG; private Map customTracingAnnotations = emptyMap(); @@ -201,7 +197,8 @@ public class PhoenixConnection private ConnectionActivityLogger connectionActivityLogger = ConnectionActivityLogger.NO_OP_LOGGER; static { - Tracing.addTraceMetricsSource(); + // OpenTelemetry tracing is initialized via the Java Agent at runtime. + // No explicit initialization needed here. CONNECTION_PROPERTIES = PhoenixRuntime.getConnectionProperties(); } @@ -218,7 +215,6 @@ public PhoenixConnection(PhoenixConnection connection, boolean isDescRowKeyOrder connection.buildingIndex, true); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; - this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; } @@ -243,7 +239,6 @@ public PhoenixConnection(PhoenixConnection connection, Properties props) throws connection.buildingIndex, true); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; - this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; } @@ -373,7 +368,6 @@ public ReadOnlyProps getProps() { this.services.addConnection(this); // setup tracing, if its enabled - this.sampler = Tracing.getConfiguredSampler(this); this.customTracingAnnotations = getImmutableCustomTracingAnnotations(); this.scannerQueue = new LinkedBlockingQueue<>(); this.tableResultIteratorFactory = new DefaultTableResultIteratorFactory(); @@ -477,14 +471,6 @@ public int getChildConnectionsCount() { return childConnections.size(); } - public Sampler getSampler() { - return this.sampler; - } - - public void setSampler(Sampler sampler) throws SQLException { - this.sampler = sampler; - } - public Map getCustomTracingAnnotations() { return customTracingAnnotations; } @@ -831,9 +817,6 @@ synchronized public void close() throws SQLException { if (childConnections != null) { SQLCloseables.closeAllQuietly(childConnections); } - if (traceScope != null) { - traceScope.close(); - } } finally { services.removeConnection(this); } @@ -875,7 +858,7 @@ public Void call() throws SQLException { } return null; } - }, Tracing.withTracing(this, "committing mutations")); + }, PhoenixTracing.withTracing("phoenix.connection.commit")); statementExecutionCounter = 0; } @@ -1156,7 +1139,7 @@ public Void call() throws SQLException { mutationState.rollback(); return null; } - }, Tracing.withTracing(this, "rolling back")); + }, PhoenixTracing.withTracing("phoenix.connection.rollback")); statementExecutionCounter = 0; } @@ -1362,14 +1345,6 @@ public void incrementStatementExecutionCounter() { } } - public TraceScope getTraceScope() { - return traceScope; - } - - public void setTraceScope(TraceScope traceScope) { - this.traceScope = traceScope; - } - @Override public Map> getMutationMetrics() { return mutationState.getMutationMetricQueue().aggregate(); diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java index bd0dc325656..68bd90189b6 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java @@ -224,7 +224,7 @@ import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PLong; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.trace.util.Tracing; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.CDCUtil; import org.apache.phoenix.util.ClientUtil; @@ -781,7 +781,8 @@ public Pair call() throws SQLException { } } - }, PhoenixContextExecutor.inContext(), Tracing.withTracing(connection, this.toString())); + }, PhoenixContextExecutor.inContext(), + PhoenixTracing.withTracing("phoenix.statement.execute")); } catch (Exception e) { if (queryLogger.isAuditLoggingEnabled()) { queryLogger.log(QueryLogInfo.TABLE_NAME_I, getTargetForAudit(stmt)); @@ -1911,6 +1912,10 @@ public MutationState execute() throws SQLException { } } + /** + * @deprecated TRACE ON/OFF are deprecated no-ops. Use the OpenTelemetry Java Agent instead. + */ + @Deprecated private static class ExecutableTraceStatement extends TraceStatement implements CompilableStatement { diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java index fa49542e50e..3d578b306bb 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/parse/TraceStatement.java @@ -19,6 +19,11 @@ import org.apache.phoenix.jdbc.PhoenixStatement.Operation; +/** + * Parse node for the {@code TRACE ON} / {@code TRACE OFF} SQL commands. + * @deprecated Use the OpenTelemetry Java Agent for automatic tracing. TRACE ON/OFF are no-ops. + */ +@Deprecated public class TraceStatement implements BindableStatement { private final boolean traceOn; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java index 630a2d4f210..be0db2af52a 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServices.java @@ -222,17 +222,27 @@ public interface QueryServices extends SQLCloseable { public static final String METADATA_WRITE_RETRIES_NUMBER = "phoenix.metadata.rpc.retries.number"; public static final String METADATA_WRITE_RETRY_PAUSE = "phoenix.metadata.rpc.pause"; - // Config parameters for for configuring tracing + // Config parameters for tracing. + // NOTE: Most of these are deprecated — tracing is now handled by OpenTelemetry. + // The OTel Java Agent is configured via environment variables (OTEL_EXPORTER_OTLP_ENDPOINT, etc.) + // rather than Phoenix configuration properties. + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_FREQ_ATTRIB = "phoenix.trace.frequency"; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_PAGE_SIZE_ATTRIB = "phoenix.trace.read.pagesize"; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_PROBABILITY_THRESHOLD_ATTRIB = "phoenix.trace.probability.threshold"; public static final String TRACING_STATS_TABLE_NAME_ATTRIB = "phoenix.trace.statsTableName"; public static final String TRACING_CUSTOM_ANNOTATION_ATTRIB_PREFIX = "phoenix.trace.custom.annotation."; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_ENABLED = "phoenix.trace.enabled"; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_BATCH_SIZE = "phoenix.trace.batchSize"; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_THREAD_POOL_SIZE = "phoenix.trace.threadPoolSize"; + @Deprecated /** Use OpenTelemetry agent configuration instead */ public static final String TRACING_TRACE_BUFFER_SIZE = "phoenix.trace.traceBufferSize"; public static final String USE_REVERSE_SCAN_ATTRIB = "phoenix.query.useReverseScan"; diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java index ba344af5b14..5c70cb8698f 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java @@ -140,7 +140,6 @@ import org.apache.phoenix.schema.PTable.ImmutableStorageScheme; import org.apache.phoenix.schema.PTable.QualifierEncodingScheme; import org.apache.phoenix.schema.PTableRefFactory; -import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.ReadOnlyProps; @@ -280,7 +279,7 @@ public class QueryServicesOptions { * Configuration key to overwrite the tablename that should be used as the target table */ public static final String DEFAULT_TRACING_STATS_TABLE_NAME = "SYSTEM.TRACING_STATS"; - public static final String DEFAULT_TRACING_FREQ = Tracing.Frequency.NEVER.getKey(); + public static final String DEFAULT_TRACING_FREQ = "never"; public static final double DEFAULT_TRACING_PROBABILITY_THRESHOLD = 0.05; public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 15 * 60000; // 15min diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java index 0da85f851dc..5008e28f606 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/schema/MetaDataClient.java @@ -4870,9 +4870,9 @@ public MutationState addColumn(PTable table, List origColumnDefs, /** * To check if TTL is defined at any of the child below we are checking it at * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl#mutateColumn(List, ColumnMutator, int, PTable, PTable, boolean)} - * level where in function - * {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl# validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], byte[], byte[], List, int)} - * we are already traversing through allDescendantViews. + * level where in function {@link org.apache.phoenix.coprocessor.MetaDataEndpointImpl# + * validateIfMutationAllowedOnParent(PTable, List, PTableType, long, byte[], byte[], + * byte[], List, int)} we are already traversing through allDescendantViews. */ } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java deleted file mode 100644 index da88fd6e7c9..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.*; - -import org.apache.hadoop.metrics2.MetricsInfo; - -import org.apache.phoenix.thirdparty.com.google.common.base.MoreObjects; -import org.apache.phoenix.thirdparty.com.google.common.base.Objects; - -/** - * Making implementing metric info a little easier - *

- * Just a copy of the same from Hadoop, but exposed for usage. - */ -public class MetricsInfoImpl implements MetricsInfo { - private final String name, description; - - MetricsInfoImpl(String name, String description) { - this.name = checkNotNull(name, "name"); - this.description = checkNotNull(description, "description"); - } - - @Override - public String name() { - return name; - } - - @Override - public String description() { - return description; - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof MetricsInfo) { - MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hashCode(name, description); - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this).add("name", name).add("description", description) - .toString(); - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java deleted file mode 100644 index 1239514f702..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION; -import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION; -import static org.apache.phoenix.metrics.MetricInfo.END; -import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME; -import static org.apache.phoenix.metrics.MetricInfo.PARENT; -import static org.apache.phoenix.metrics.MetricInfo.SPAN; -import static org.apache.phoenix.metrics.MetricInfo.START; -import static org.apache.phoenix.metrics.MetricInfo.TAG; -import static org.apache.phoenix.metrics.MetricInfo.TRACE; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import org.apache.commons.configuration2.SubsetConfiguration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsRecord; -import org.apache.hadoop.metrics2.MetricsSink; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.phoenix.compile.MutationPlan; -import org.apache.phoenix.execute.MutationState; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.metrics.MetricInfo; -import org.apache.phoenix.metrics.Metrics; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.schema.TableNotFoundException; -import org.apache.phoenix.trace.util.Tracing; -import org.apache.phoenix.util.QueryUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; - -/** - * Write the metrics to a phoenix table. Generally, this class is instantiated via hadoop-metrics2 - * property files. Specifically, you would create this class by adding the following to by This - * would actually be set as: - * [prefix].sink.[some instance name].class=org.apache.phoenix.trace.PhoenixMetricsSink - * , where prefix is either: - *

    - *
  1. "phoenix", for the client
  2. - *
  3. "hbase", for the server
  4. - *
- * and some instance name is just any unique name, so properties can be differentiated if - * there are multiple sinks of the same type created - */ -public class PhoenixMetricsSink implements MetricsSink { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixMetricsSink.class); - - private static final String VARIABLE_VALUE = "?"; - - private static final Joiner COLUMN_JOIN = Joiner.on("."); - static final String TAG_FAMILY = "tags"; - /** - * Count of the number of tags we are storing for this row - */ - static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); - - static final String ANNOTATION_FAMILY = "annotations"; - static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); - - /** - * Join strings on a comma - */ - private static final Joiner COMMAS = Joiner.on(','); - - private Connection conn; - - private String table; - - public PhoenixMetricsSink() { - LOGGER.info("Writing tracing metrics to phoenix table"); - - } - - @Override - public void init(SubsetConfiguration config) { - Metrics.markSinkInitialized(); - LOGGER.info("Phoenix tracing writer started"); - } - - /** - * Initialize this only when we need it - */ - private void lazyInitialize() { - synchronized (this) { - if (this.conn != null) { - return; - } - try { - // create the phoenix connection - Properties props = new Properties(); - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, Tracing.Frequency.NEVER.getKey()); - org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create(); - Connection conn = QueryUtil.getConnectionOnServer(props, conf); - // enable bulk loading when we have enough data - conn.setAutoCommit(true); - - String tableName = conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB, - QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME); - - initializeInternal(conn, tableName); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - - private void initializeInternal(Connection conn, String tableName) throws SQLException { - this.conn = conn; - // ensure that the target table already exists - if (!traceTableExists(conn, tableName)) { - createTable(conn, tableName); - } - this.table = tableName; - } - - private boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { - try { - conn.unwrap(PhoenixConnection.class).getTable(traceTableName); - return true; - } catch (TableNotFoundException e) { - return false; - } - } - - /** - * Used for TESTING ONLY Initialize the connection and setup the table to use the - * {@link org.apache.phoenix.query.QueryServicesOptions#DEFAULT_TRACING_STATS_TABLE_NAME} - * @param conn to store for upserts and to create the table (if necessary) - * @param tableName TODO - * @throws SQLException if any phoenix operation fails - */ - @VisibleForTesting - public void initForTesting(Connection conn, String tableName) throws SQLException { - initializeInternal(conn, tableName); - } - - /** - * Create a stats table with the given name. Stores the name for use later when creating upsert - * statements - * @param conn connection to use when creating the table - * @param table name of the table to create - * @throws SQLException if any phoenix operations fails - */ - private void createTable(Connection conn, String table) throws SQLException { - // only primary-key columns can be marked non-null - String ddl = "create table if not exists " + table + "( " + TRACE.columnName - + " bigint not null, " + PARENT.columnName + " bigint not null, " + SPAN.columnName - + " bigint not null, " + DESCRIPTION.columnName + " varchar, " + START.columnName - + " bigint, " + END.columnName + " bigint, " + HOSTNAME.columnName + " varchar, " + TAG_COUNT - + " smallint, " + ANNOTATION_COUNT + " smallint" + " CONSTRAINT pk PRIMARY KEY (" - + TRACE.columnName + ", " + PARENT.columnName + ", " + SPAN.columnName + "))\n" + - // We have a config parameter that can be set so that tables are - // transactional by default. If that's set, we still don't want these system - // tables created as transactional tables, make these table non - // transactional - PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; - try (PreparedStatement stmt = conn.prepareStatement(ddl)) { - stmt.execute(); - } - } - - @Override - public void flush() { - try { - this.conn.commit(); - } catch (SQLException e) { - LOGGER.error("Failed to commit changes to table", e); - } - } - - /** - * Add a new metric record to be written. - */ - @Override - public void putMetrics(MetricsRecord record) { - // its not a tracing record, we are done. This could also be handled by filters, but safer - // to do it here, in case it gets misconfigured - if (!record.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { - return; - } - - // don't initialize until we actually have something to write - lazyInitialize(); - - String stmt = "UPSERT INTO " + table + " ("; - // drop it into the queue of things that should be written - List keys = new ArrayList(); - List values = new ArrayList(); - // we need to keep variable values in a separate set since they may have spaces, which - // causes the parser to barf. Instead, we need to add them after the statement is prepared - List variableValues = new ArrayList(record.tags().size()); - keys.add(TRACE.columnName); - values.add(Long.parseLong(record.name().substring(TracingUtils.METRIC_SOURCE_KEY.length()))); - - keys.add(DESCRIPTION.columnName); - values.add(VARIABLE_VALUE); - variableValues.add(record.description()); - - // add each of the metrics - for (AbstractMetric metric : record.metrics()) { - // name of the metric is also the column name to which we write - keys.add(MetricInfo.getColumnName(metric.name())); - values.add(metric.value()); - } - - // get the tags out so we can set them later (otherwise, need to be a single value) - int annotationCount = 0; - int tagCount = 0; - for (MetricsTag tag : record.tags()) { - if (tag.name().equals(ANNOTATION.traceName)) { - addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION, - annotationCount); - annotationCount++; - } else if (tag.name().equals(TAG.traceName)) { - addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount); - tagCount++; - } else if (tag.name().equals(HOSTNAME.traceName)) { - keys.add(HOSTNAME.columnName); - values.add(VARIABLE_VALUE); - variableValues.add(tag.value()); - } else if (tag.name().equals("Context")) { - // ignored - } else { - LOGGER.error("Got an unexpected tag: " + tag); - } - } - - // add the tag count, now that we know it - keys.add(TAG_COUNT); - // ignore the hostname in the tags, if we know it - values.add(tagCount); - - keys.add(ANNOTATION_COUNT); - values.add(annotationCount); - - // compile the statement together - stmt += COMMAS.join(keys); - stmt += ") VALUES (" + COMMAS.join(values) + ")"; - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Logging metrics to phoenix table via: " + stmt); - LOGGER.trace("With tags: " + variableValues); - } - try (PreparedStatement ps = conn.prepareStatement(stmt)) { - // add everything that wouldn't/may not parse - int index = 1; - for (String tag : variableValues) { - ps.setString(index++, tag); - } - // Not going through the standard route of using statement.execute() as that code path - // is blocked if the metadata hasn't been been upgraded to the new minor release. - MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); - MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); - MutationState newState = plan.execute(); - state.join(newState); - } catch (SQLException e) { - LOGGER.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt, e); - } - } - - public static String getDynamicColumnName(String family, String column, int count) { - return COLUMN_JOIN.join(family, column) + count; - } - - private void addDynamicEntry(List keys, List values, List variableValues, - String family, MetricsTag tag, MetricInfo metric, int count) { - // <.dynColumn> - keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); - - // build the annotation value - String val = tag.description() + " - " + tag.value(); - values.add(VARIABLE_VALUE); - variableValues.add(val); - } - - @VisibleForTesting - public void clearForTesting() throws SQLException { - this.conn.rollback(); - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixTracing.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixTracing.java new file mode 100644 index 00000000000..ff9b23b1641 --- /dev/null +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/PhoenixTracing.java @@ -0,0 +1,334 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.trace; + +import io.opentelemetry.api.GlobalOpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.Scope; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.apache.phoenix.call.CallWrapper; + +import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; + +/** + * Central tracing facade for Apache Phoenix using OpenTelemetry, following the same pattern as + * HBase's {@code TraceUtil} (HBASE-22120). All methods are no-ops with zero overhead when no + * OpenTelemetry SDK is configured. + * @see PHOENIX-5215 + */ +public final class PhoenixTracing { + + private static final String INSTRUMENTATION_NAME = "org.apache.phoenix"; + + private PhoenixTracing() { + } + + /** + * Returns the global tracer for Phoenix. The tracer is obtained from {@link GlobalOpenTelemetry} + * on each call (the OTel SDK caches it internally, so there is no performance penalty). This + * avoids issues with eager initialization when the SDK has not been configured yet. + */ + public static Tracer getTracer() { + return GlobalOpenTelemetry.getTracer(INSTRUMENTATION_NAME); + } + + /** + * Create a {@link SpanKind#INTERNAL} span. This is the default for most Phoenix operations (query + * compilation, mutation processing, index maintenance, etc.). + */ + public static Span createSpan(String name) { + return createSpan(name, SpanKind.INTERNAL); + } + + /** + * Create a span with the given {@code kind}. Note that OpenTelemetry expects at most one + * {@link SpanKind#CLIENT} span and one {@link SpanKind#SERVER} span per traced request, so use + * this with caution for kinds other than {@link SpanKind#INTERNAL}. + */ + private static Span createSpan(String name, SpanKind kind) { + return getTracer().spanBuilder(name).setSpanKind(kind).startSpan(); + } + + /** + * Create a {@link SpanKind#CLIENT} span. Use this for outgoing RPC calls from Phoenix client to + * HBase (e.g., scan requests, mutation commits). + */ + public static Span createClientSpan(String name) { + return createSpan(name, SpanKind.CLIENT); + } + + /** + * Create a {@link SpanKind#SERVER} span from a remote context. Use this for operations that + * originate from a remote call, such as coprocessor endpoints. The parent context is typically + * extracted from RPC headers. + */ + public static Span createRemoteSpan(String name, Context ctx) { + return getTracer().spanBuilder(name).setParent(ctx).setSpanKind(SpanKind.SERVER).startSpan(); + } + + /** + * Check if the current span is recording. This can be used to avoid expensive attribute + * computation when tracing is not active. + * @return {@code true} if the current span is recording, {@code false} if tracing is off or no + * SDK is configured. + */ + public static boolean isRecording() { + return Span.current().isRecording(); + } + + /** + * Record an exception on the given span and set its status to {@link StatusCode#ERROR}. + */ + public static void setError(Span span, Throwable error) { + span.recordException(error); + span.setStatus(StatusCode.ERROR); + } + + /** + * Trace the execution of a synchronous operation that may throw. Creates a span, runs the + * operation, and automatically handles success/error status and span lifecycle. + * + *
+   * PhoenixTracing.trace(() -> {
+   *   doSomeWork();
+   * }, "phoenix.operation.name");
+   * 
+ */ + public static void trace(ThrowingRunnable runnable, String spanName) + throws T { + trace(runnable, () -> createSpan(spanName)); + } + + /** + * Trace the execution of a synchronous operation that may throw, using a custom span supplier. + * This allows callers to customize the span (e.g., set attributes before starting). + */ + public static void trace(ThrowingRunnable runnable, + Supplier spanSupplier) throws T { + Span span = spanSupplier.get(); + try (Scope ignored = span.makeCurrent()) { + runnable.run(); + span.setStatus(StatusCode.OK); + } catch (Throwable e) { + setError(span, e); + throw e; + } finally { + span.end(); + } + } + + /** + * Trace the execution of a synchronous operation that returns a value and may throw. + * + *
+   * QueryPlan plan = PhoenixTracing.trace(() -> {
+   *   return compiler.compile(sql);
+   * }, "phoenix.query.compile");
+   * 
+ */ + public static R trace(ThrowingCallable callable, String spanName) + throws T { + return trace(callable, () -> createSpan(spanName)); + } + + /** + * Trace the execution of a synchronous operation that returns a value and may throw, using a + * custom span supplier. + */ + public static R trace(ThrowingCallable callable, + Supplier spanSupplier) throws T { + Span span = spanSupplier.get(); + try (Scope ignored = span.makeCurrent()) { + R result = callable.call(); + span.setStatus(StatusCode.OK); + return result; + } catch (Throwable e) { + setError(span, e); + throw e; + } finally { + span.end(); + } + } + + /** + * Trace an asynchronous operation that returns a {@link CompletableFuture}. The span is ended + * when the future completes (either successfully or with an error). + * + *
+   * CompletableFuture<Result> future = PhoenixTracing.tracedFuture(() -> {
+   *   return asyncOperation();
+   * }, "phoenix.async.operation");
+   * 
+ */ + public static CompletableFuture tracedFuture(Supplier> action, + String spanName) { + return tracedFuture(action, () -> createSpan(spanName)); + } + + /** + * Trace an asynchronous operation that returns a {@link CompletableFuture}, using a custom span + * supplier. + */ + public static CompletableFuture tracedFuture(Supplier> action, + Supplier spanSupplier) { + Span span = spanSupplier.get(); + try (Scope ignored = span.makeCurrent()) { + CompletableFuture future = action.get(); + endSpan(future, span); + return future; + } + } + + /** + * Wrap the provided {@link Runnable} in a traced {@link Runnable}. The returned runnable will + * create a span, execute the original runnable, and end the span. + */ + public static Runnable tracedRunnable(Runnable runnable, String spanName) { + return tracedRunnable(runnable, () -> createSpan(spanName)); + } + + /** + * Wrap the provided {@link Runnable} in a traced {@link Runnable}, using a custom span supplier. + */ + public static Runnable tracedRunnable(Runnable runnable, Supplier spanSupplier) { + return () -> { + Span span = spanSupplier.get(); + try (Scope ignored = span.makeCurrent()) { + runnable.run(); + span.setStatus(StatusCode.OK); + } finally { + span.end(); + } + }; + } + + /** + * Wrap a {@link Callable} with the current OpenTelemetry context. This ensures that the trace + * context is propagated when the callable is executed in a different thread (e.g., in a thread + * pool for parallel scans). + * + *
+   * Callable<Result> wrapped = PhoenixTracing.wrap(myCallable);
+   * executor.submit(wrapped); // trace context is preserved
+   * 
+ */ + public static Callable wrap(Callable callable) { + return Context.current().wrap(callable); + } + + /** + * Wrap a {@link Runnable} with the current OpenTelemetry context. This ensures that the trace + * context is propagated when the runnable is executed in a different thread. + */ + public static Runnable wrap(Runnable runnable) { + return Context.current().wrap(runnable); + } + + /** + * Finish the span when the given future completes. Sets {@link StatusCode#OK} on success, or + * records the exception and sets {@link StatusCode#ERROR} on failure. + */ + private static void endSpan(CompletableFuture future, Span span) { + future.whenComplete((resp, error) -> { + if (error != null) { + setError(span, error); + } else { + span.setStatus(StatusCode.OK); + } + span.end(); + }); + } + + /** + * Create a {@link CallWrapper} that wraps a call with an OpenTelemetry span. The span is started + * in {@code before()} and ended in {@code after()}. This is used with + * {@link org.apache.phoenix.call.CallRunner#run} to trace operations like commit and rollback. + */ + public static CallWrapper withTracing(String spanName) { + return new CallWrapper() { + private Span span; + private Scope scope; + + @Override + public void before() { + span = createSpan(spanName); + scope = span.makeCurrent(); + } + + @Override + public void after() { + // Note: after() is called from CallRunner's finally block, so we don't + // know if the operation succeeded or failed. We don't set StatusCode.OK + // here because the span may represent a failed operation. If the caller + // needs to record errors, they should do so on Span.current() before + // the exception propagates. + try { + if (scope != null) { + scope.close(); + } + } finally { + if (span != null) { + span.end(); + } + } + } + }; + } + + /** + * Parse a TRACE ON/OFF option string from the SQL grammar. + * @param traceOption the option string, expected to be "ON" or "OFF" (case-insensitive) + * @return {@code true} if tracing should be enabled, {@code false} otherwise + * @throws IllegalArgumentException if the option is not recognized + */ + public static boolean isTraceOn(String traceOption) { + Preconditions.checkNotNull(traceOption, "traceOption must not be null"); + if (traceOption.equalsIgnoreCase("ON")) { + return true; + } + if (traceOption.equalsIgnoreCase("OFF")) { + return false; + } + throw new IllegalArgumentException("Unknown tracing option: " + traceOption); + } + + /** + * A {@link Runnable}-like interface that may throw a checked exception. + * @param the type of {@link Throwable} that can be thrown + */ + @FunctionalInterface + public interface ThrowingRunnable { + void run() throws T; + } + + /** + * A {@link Callable}-like interface that may throw a checked exception. + * @param the result type + * @param the type of {@link Throwable} that can be thrown + */ + @FunctionalInterface + public interface ThrowingCallable { + R call() throws T; + } +} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java deleted file mode 100644 index 0611ebd56bf..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceReader.java +++ /dev/null @@ -1,374 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import org.apache.htrace.Span; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.metrics.MetricInfo; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.util.LogUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.primitives.Longs; - -/** - * Read the traces written to phoenix tables by the {@link TraceWriter}. - */ -public class TraceReader { - - private static final Logger LOGGER = LoggerFactory.getLogger(TraceReader.class); - private final Joiner comma = Joiner.on(','); - private String knownColumns; - { - // the order here dictates the order we pull out the values below. For now, just keep them - // in sync - so we can be efficient pulling them off the results. - knownColumns = comma.join(MetricInfo.TRACE.columnName, MetricInfo.PARENT.columnName, - MetricInfo.SPAN.columnName, MetricInfo.DESCRIPTION.columnName, MetricInfo.START.columnName, - MetricInfo.END.columnName, MetricInfo.HOSTNAME.columnName, TraceWriter.TAG_COUNT, - TraceWriter.ANNOTATION_COUNT); - } - - private Connection conn; - private String table; - private int pageSize; - - public TraceReader(Connection conn, String tracingTableName) throws SQLException { - this.conn = conn; - this.table = tracingTableName; - String ps = conn.getClientInfo(QueryServices.TRACING_PAGE_SIZE_ATTRIB); - this.pageSize = - ps == null ? QueryServicesOptions.DEFAULT_TRACING_PAGE_SIZE : Integer.parseInt(ps); - } - - /** - * Read all the currently stored traces. - *

- * Be Careful! This could cause an OOME if there are a lot of traces. - * @param limit max number of traces to return. If -1, returns all known traces. - * @return the found traces - */ - public Collection readAll(int limit) throws SQLException { - Set traces = new HashSet(); - // read all the known columns from the table, sorting first by trace column (so the same - // trace - // goes together), and then by start time (so parent spans always appear before child spans) - String query = - "SELECT " + knownColumns + " FROM " + table + " ORDER BY " + MetricInfo.TRACE.columnName - + " DESC, " + MetricInfo.START.columnName + " ASC" + " LIMIT " + pageSize; - int resultCount = 0; - try (PreparedStatement stmt = conn.prepareStatement(query); - ResultSet results = stmt.executeQuery()) { - TraceHolder trace = null; - // the spans that are not the root span, but haven't seen their parent yet - List orphans = null; - while (results.next()) { - int index = 1; - long traceid = results.getLong(index++); - long parent = results.getLong(index++); - long span = results.getLong(index++); - String desc = results.getString(index++); - long start = results.getLong(index++); - long end = results.getLong(index++); - String host = results.getString(index++); - int tagCount = results.getInt(index++); - int annotationCount = results.getInt(index++); - // we have a new trace - if (trace == null || traceid != trace.traceid) { - // only increment if we are on a new trace, to ensure we get at least one - if (trace != null) { - resultCount++; - } - // we beyond the limit, so we stop - if (resultCount >= limit) { - break; - } - trace = new TraceHolder(); - // add the orphans, so we can track them later - orphans = new ArrayList(); - trace.orphans = orphans; - trace.traceid = traceid; - traces.add(trace); - } - - // search the spans to determine the if we have a known parent - SpanInfo parentSpan = null; - if (parent != Span.ROOT_SPAN_ID) { - // find the parent - for (SpanInfo p : trace.spans) { - if (p.id == parent) { - parentSpan = p; - break; - } - } - } - SpanInfo spanInfo = - new SpanInfo(parentSpan, parent, span, desc, start, end, host, tagCount, annotationCount); - // search the orphans to see if this is the parent id - - for (int i = 0; i < orphans.size(); i++) { - SpanInfo orphan = orphans.get(i); - // we found the parent for the orphan - if (orphan.parentId == span) { - // update the bi-directional relationship - orphan.parent = spanInfo; - spanInfo.children.add(orphan); - // / its no longer an orphan - LOGGER.trace(addCustomAnnotations("Found parent for span: " + span)); - orphans.remove(i--); - } - } - - if (parentSpan != null) { - // add this as a child to the parent span - parentSpan.children.add(spanInfo); - } else if (parent != Span.ROOT_SPAN_ID) { - // add the span to the orphan pile to check for the remaining spans we see - LOGGER.info(addCustomAnnotations("No parent span found for span: " + span - + " (root span id: " + Span.ROOT_SPAN_ID + ")")); - orphans.add(spanInfo); - } - - // add the span to the full known list - trace.spans.add(spanInfo); - - // go back and find the tags for the row - spanInfo.tags.addAll(getTags(traceid, parent, span, tagCount)); - - spanInfo.annotations.addAll(getAnnotations(traceid, parent, span, annotationCount)); - } - } - - return traces; - } - - private Collection getTags(long traceid, long parent, long span, int count) - throws SQLException { - return getDynamicCountColumns(traceid, parent, span, count, TraceWriter.TAG_FAMILY, - MetricInfo.TAG.columnName); - } - - private Collection getAnnotations(long traceid, long parent, long span, - int count) throws SQLException { - return getDynamicCountColumns(traceid, parent, span, count, TraceWriter.ANNOTATION_FAMILY, - MetricInfo.ANNOTATION.columnName); - } - - private Collection getDynamicCountColumns(long traceid, long parent, long span, - int count, String family, String columnName) throws SQLException { - if (count == 0) { - return Collections.emptyList(); - } - - // build the column strings, family.column - String[] parts = new String[count]; - for (int i = 0; i < count; i++) { - parts[i] = TraceWriter.getDynamicColumnName(family, columnName, i); - } - // join the columns together - String columns = comma.join(parts); - - // redo them and add "VARCHAR to the end, so we can specify the columns - for (int i = 0; i < count; i++) { - parts[i] = parts[i] + " VARCHAR"; - } - - String dynamicColumns = comma.join(parts); - String request = "SELECT " + columns + " from " + table + "(" + dynamicColumns + ") WHERE " - + MetricInfo.TRACE.columnName + "=" + traceid + " AND " + MetricInfo.PARENT.columnName + "=" - + parent + " AND " + MetricInfo.SPAN.columnName + "=" + span; - LOGGER.trace(addCustomAnnotations("Requesting columns with: " + request)); - ResultSet results = conn.createStatement().executeQuery(request); - List cols = new ArrayList(); - while (results.next()) { - for (int index = 1; index <= count; index++) { - cols.add(results.getString(index)); - } - } - if (cols.size() < count) { - LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " - + cols.size() + " tags from rquest " + request)); - } - return cols; - } - - private String addCustomAnnotations(String logLine) throws SQLException { - if (conn.isWrapperFor(PhoenixConnection.class)) { - PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class); - logLine = LogUtil.addCustomAnnotations(logLine, phxConn); - } - return logLine; - } - - /** - * Holds information about a trace - */ - public static class TraceHolder { - public List orphans; - public long traceid; - public TreeSet spans = new TreeSet(); - - @Override - public int hashCode() { - return new Long(traceid).hashCode(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof TraceHolder) { - return traceid == ((TraceHolder) o).traceid; - } - return false; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("Trace: " + traceid + "\n"); - // get the first span, which is always going to be the root span - SpanInfo root = spans.iterator().next(); - if (root.parent != null) { - sb.append("Root span not present! Just printing found spans\n"); - for (SpanInfo span : spans) { - sb.append(span.toString() + "\n"); - } - } else { - // print the tree of spans - List toPrint = new ArrayList(); - toPrint.add(root); - while (!toPrint.isEmpty()) { - SpanInfo span = toPrint.remove(0); - sb.append(span.toString() + "\n"); - toPrint.addAll(span.children); - } - } - if (orphans.size() > 0) { - sb.append("Found orphan spans:\n" + orphans); - } - return sb.toString(); - } - } - - public static class SpanInfo implements Comparable { - public SpanInfo parent; - public List children = new ArrayList(); - public String description; - public long id; - public long start; - public long end; - public String hostname; - public int tagCount; - public List tags = new ArrayList(); - public int annotationCount; - public List annotations = new ArrayList(); - private long parentId; - - public SpanInfo(SpanInfo parent, long parentid, long span, String desc, long start, long end, - String host, int tagCount, int annotationCount) { - this.parent = parent; - this.parentId = parentid; - this.id = span; - this.description = desc; - this.start = start; - this.end = end; - this.hostname = host; - this.tagCount = tagCount; - this.annotationCount = annotationCount; - } - - @Override - public int hashCode() { - return new Long(id).hashCode(); - } - - @Override - public boolean equals(Object o) { - if (o instanceof SpanInfo) { - return id == ((SpanInfo) o).id; - } - return false; - } - - /** - * Do the same sorting that we would get from reading the table with a {@link TraceReader}, - * specifically, by trace and then by start/end. However, these are only every stored in a - * single trace, so we can just sort on start/end times. - */ - @Override - public int compareTo(SpanInfo o) { - // root span always comes first - if (this.parentId == Span.ROOT_SPAN_ID) { - return -1; - } else if (o.parentId == Span.ROOT_SPAN_ID) { - return 1; - } - - int compare = Longs.compare(start, o.start); - if (compare == 0) { - compare = Longs.compare(end, o.end); - if (compare == 0) { - return Longs.compare(id, o.id); - } - } - return compare; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("Span: " + id + "\n"); - sb.append("\tdescription=" + description); - sb.append("\n"); - sb.append("\tparent=" + (parent == null - ? (parentId == Span.ROOT_SPAN_ID ? "ROOT" : "[orphan - id: " + parentId + "]") - : parent.id)); - sb.append("\n"); - sb.append("\tstart,end=" + start + "," + end); - sb.append("\n"); - sb.append("\telapsed=" + (end - start)); - sb.append("\n"); - sb.append("\thostname=" + hostname); - sb.append("\n"); - sb.append("\ttags=(" + tagCount + ") " + tags); - sb.append("\n"); - sb.append("\tannotations=(" + annotationCount + ") " + annotations); - sb.append("\n"); - sb.append("\tchildren="); - for (SpanInfo child : children) { - sb.append(child.id + ", "); - } - sb.append("\n"); - return sb.toString(); - } - - public long getParentIdForTesting() { - return parentId; - } - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java deleted file mode 100644 index 95bd7de4e85..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceSpanReceiver.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import java.io.IOException; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import org.apache.htrace.Span; -import org.apache.htrace.SpanReceiver; -import org.apache.phoenix.metrics.MetricInfo; -import org.apache.phoenix.query.QueryServicesOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link TraceWriter} in a - * format that we can more easily consume. - *

- *

- * Rather than write directly to a phoenix table, we drop it into the metrics queue so we can more - * cleanly handle it asynchronously.Currently, {@link MilliSpan} submits the span in a synchronized - * block to all the receivers, which could have a lot of overhead if we are submitting to multiple - * receivers. - *

- * The format of the generated metrics is this: - *

    - *
  1. All Metrics from the same span have the same trace id (allowing correlation in the sink)
  2. - *
  3. The description of the metric describes what it contains. For instance, - *
      - *
    • {@link MetricInfo#PARENT} is the id of the parent of this span. (Root span is - * {@link Span#ROOT_SPAN_ID}).
    • - *
    • {@link MetricInfo#START} is the start time of the span
    • - *
    • {@link MetricInfo#END} is the end time of the span
    • - *
    - *
  4. - *
- *

- * So why even submit to {@link TraceWriter} if we only have a single source? - *

- * This allows us to make the updates in batches. We might have spans that finish before other spans - * (for instance in the same parent). By batching the updates we can lessen the overhead on the - * client, which is also busy doing 'real' work.
- * This class is custom implementation of metrics queue and handles batch writes to the Phoenix - * Table via another thread. Batch size and number of threads are configurable. - *

- */ -public class TraceSpanReceiver implements SpanReceiver { - - private static final Logger LOGGER = LoggerFactory.getLogger(TraceSpanReceiver.class); - - private static final int CAPACITY = - QueryServicesOptions.withDefaults().getTracingTraceBufferSize(); - - private BlockingQueue spanQueue = null; - - public TraceSpanReceiver() { - this.spanQueue = new ArrayBlockingQueue(CAPACITY); - } - - @Override - public void receiveSpan(Span span) { - if (span.getTraceId() != 0 && spanQueue.offer(span)) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Span buffered to queue " + span.toJson()); - } - } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) { - LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson()); - } - } - - @Override - public void close() throws IOException { - // noop - } - - boolean isSpanAvailable() { - return spanQueue.isEmpty(); - } - - Span getSpan() { - return spanQueue.poll(); - } - - int getNumSpans() { - return spanQueue.size(); - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java deleted file mode 100644 index a905c961258..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TraceWriter.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION; -import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION; -import static org.apache.phoenix.metrics.MetricInfo.END; -import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME; -import static org.apache.phoenix.metrics.MetricInfo.PARENT; -import static org.apache.phoenix.metrics.MetricInfo.SPAN; -import static org.apache.phoenix.metrics.MetricInfo.START; -import static org.apache.phoenix.metrics.MetricInfo.TAG; -import static org.apache.phoenix.metrics.MetricInfo.TRACE; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.htrace.Span; -import org.apache.htrace.TimelineAnnotation; -import org.apache.phoenix.compile.MutationPlan; -import org.apache.phoenix.execute.MutationState; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; -import org.apache.phoenix.jdbc.PhoenixPreparedStatement; -import org.apache.phoenix.metrics.MetricInfo; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.schema.TableNotFoundException; -import org.apache.phoenix.trace.util.Tracing; -import org.apache.phoenix.util.QueryUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.phoenix.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.phoenix.thirdparty.com.google.common.base.Joiner; -import org.apache.phoenix.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - -/** - * Sink for the trace spans pushed into the queue by {@link TraceSpanReceiver}. The class - * instantiates a thread pool of configurable size, which will pull the data from queue and write to - * the Phoenix Trace Table in batches. Various configuration options include thread pool size and - * batch commit size. - */ -public class TraceWriter { - private static final Logger LOGGER = LoggerFactory.getLogger(TraceWriter.class); - - private static final String VARIABLE_VALUE = "?"; - - private static final Joiner COLUMN_JOIN = Joiner.on("."); - static final String TAG_FAMILY = "tags"; - /** - * Count of the number of tags we are storing for this row - */ - static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count"); - - static final String ANNOTATION_FAMILY = "annotations"; - static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count"); - - /** - * Join strings on a comma - */ - private static final Joiner COMMAS = Joiner.on(','); - - private String tableName; - private int batchSize; - private int numThreads; - private TraceSpanReceiver traceSpanReceiver; - - protected ScheduledExecutorService executor; - - public TraceWriter(String tableName, int numThreads, int batchSize) { - - this.batchSize = batchSize; - this.numThreads = numThreads; - this.tableName = tableName; - } - - public void start() { - - traceSpanReceiver = getTraceSpanReceiver(); - if (traceSpanReceiver == null) { - LOGGER.warn("No receiver has been initialized for TraceWriter. Traces will not be written."); - LOGGER.warn("Restart Phoenix to try again."); - return; - } - - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setDaemon(true).setNameFormat("PHOENIX-METRICS-WRITER"); - executor = Executors.newScheduledThreadPool(this.numThreads, builder.build()); - - for (int i = 0; i < this.numThreads; i++) { - executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS); - } - - LOGGER.info("Writing tracing metrics to phoenix table"); - } - - @VisibleForTesting - protected TraceSpanReceiver getTraceSpanReceiver() { - return Tracing.getTraceSpanReceiver(); - } - - public class FlushMetrics implements Runnable { - - private Connection conn; - private int counter = 0; - - public FlushMetrics() { - conn = getConnection(tableName); - } - - @Override - public void run() { - if (conn == null) return; - while (!traceSpanReceiver.isSpanAvailable()) { - Span span = traceSpanReceiver.getSpan(); - if (null == span) break; - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Span received: " + span.toJson()); - } - addToBatch(span); - counter++; - if (counter >= batchSize) { - commitBatch(conn); - counter = 0; - } - } - } - - private void addToBatch(Span span) { - - String stmt = "UPSERT INTO " + tableName + " ("; - // drop it into the queue of things that should be written - List keys = new ArrayList(); - List values = new ArrayList(); - // we need to keep variable values in a separate set since they may have spaces, which - // causes the parser to barf. Instead, we need to add them after the statement is - // prepared - List variableValues = new ArrayList(); - keys.add(TRACE.columnName); - values.add(span.getTraceId()); - - keys.add(DESCRIPTION.columnName); - values.add(VARIABLE_VALUE); - variableValues.add(span.getDescription()); - - keys.add(SPAN.traceName); - values.add(span.getSpanId()); - - keys.add(PARENT.traceName); - values.add(span.getParentId()); - - keys.add(START.traceName); - values.add(span.getStartTimeMillis()); - - keys.add(END.traceName); - values.add(span.getStopTimeMillis()); - - int annotationCount = 0; - int tagCount = 0; - - // add the tags to the span. They were written in order received so we mark them as such - for (TimelineAnnotation ta : span.getTimelineAnnotations()) { - addDynamicEntry(keys, values, variableValues, TAG_FAMILY, Long.toString(ta.getTime()), - ta.getMessage(), TAG, tagCount); - tagCount++; - } - - // add the annotations. We assume they are serialized as strings and integers, but that - // can - // change in the future - Map annotations = span.getKVAnnotations(); - for (Map.Entry annotation : annotations.entrySet()) { - Pair val = - TracingUtils.readAnnotation(annotation.getKey(), annotation.getValue()); - addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, val.getFirst(), - val.getSecond(), ANNOTATION, annotationCount); - annotationCount++; - } - - // add the tag count, now that we know it - keys.add(TAG_COUNT); - // ignore the hostname in the tags, if we know it - values.add(tagCount); - - keys.add(ANNOTATION_COUNT); - values.add(annotationCount); - - // compile the statement together - stmt += COMMAS.join(keys); - stmt += ") VALUES (" + COMMAS.join(values) + ")"; - - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Logging metrics to phoenix table via: " + stmt); - LOGGER.trace("With tags: " + variableValues); - } - try (PreparedStatement ps = conn.prepareStatement(stmt)) { - // add everything that wouldn't/may not parse - int index = 1; - for (String tag : variableValues) { - ps.setString(index++, tag); - } - - // Not going through the standard route of using statement.execute() as that code - // path - // is blocked if the metadata hasn't been been upgraded to the new minor release. - MutationPlan plan = ps.unwrap(PhoenixPreparedStatement.class).compileMutation(stmt); - MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState(); - MutationState newState = plan.execute(); - state.join(newState); - } catch (SQLException e) { - LOGGER.error("Could not write metric: \n" + span + " to prepared statement:\n" + stmt, e); - } - } - } - - public static String getDynamicColumnName(String family, String column, int count) { - return COLUMN_JOIN.join(family, column) + count; - } - - private void addDynamicEntry(List keys, List values, List variableValues, - String family, String desc, String value, MetricInfo metric, int count) { - // <.dynColumn> - keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR"); - - // build the annotation value - String val = desc + " - " + value; - values.add(VARIABLE_VALUE); - variableValues.add(val); - } - - protected Connection getConnection(String tableName) { - - try { - // create the phoenix connection - Properties props = new Properties(); - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, Tracing.Frequency.NEVER.getKey()); - Configuration conf = HBaseConfiguration.create(); - Connection conn = QueryUtil.getConnectionOnServer(props, conf); - - if (!traceTableExists(conn, tableName)) { - createTable(conn, tableName); - } - - LOGGER.info("Created new connection for tracing " + conn.toString() + " Table: " + tableName); - return conn; - } catch (Exception e) { - LOGGER.error( - "Tracing will NOT be pursued. New connection failed for tracing Table: " + tableName, e); - LOGGER.error("Restart Phoenix to retry."); - return null; - } - } - - protected boolean traceTableExists(Connection conn, String traceTableName) throws SQLException { - try { - conn.unwrap(PhoenixConnection.class).getTable(traceTableName); - return true; - } catch (TableNotFoundException e) { - return false; - } - } - - /** - * Create a stats table with the given name. Stores the name for use later when creating upsert - * statements - * @param conn connection to use when creating the table - * @param table name of the table to create - * @throws SQLException if any phoenix operations fails - */ - protected void createTable(Connection conn, String table) throws SQLException { - // only primary-key columns can be marked non-null - String ddl = "create table if not exists " + table + "( " + TRACE.columnName - + " bigint not null, " + PARENT.columnName + " bigint not null, " + SPAN.columnName - + " bigint not null, " + DESCRIPTION.columnName + " varchar, " + START.columnName - + " bigint, " + END.columnName + " bigint, " + HOSTNAME.columnName + " varchar, " + TAG_COUNT - + " smallint, " + ANNOTATION_COUNT + " smallint" + " CONSTRAINT pk PRIMARY KEY (" - + TRACE.columnName + ", " + PARENT.columnName + ", " + SPAN.columnName + "))\n" + - // We have a config parameter that can be set so that tables are - // transactional by default. If that's set, we still don't want these system - // tables created as transactional tables, make these table non - // transactional - PhoenixDatabaseMetaData.TRANSACTIONAL + "=" + Boolean.FALSE; - PreparedStatement stmt = conn.prepareStatement(ddl); - stmt.execute(); - } - - protected void commitBatch(Connection conn) { - try { - conn.commit(); - } catch (SQLException e) { - LOGGER.error( - "Unable to commit traces on conn: " + conn.toString() + " to table: " + tableName, e); - } - } - -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java index 95fad9fb52e..f713b5c3f56 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingIterator.java @@ -17,39 +17,53 @@ */ package org.apache.phoenix.trace; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; +import io.opentelemetry.context.Scope; import java.sql.SQLException; -import org.apache.htrace.TraceScope; import org.apache.phoenix.iterate.DelegateResultIterator; import org.apache.phoenix.iterate.ResultIterator; import org.apache.phoenix.schema.tuple.Tuple; /** - * A simple iterator that closes the trace scope when the iterator is closed. + * A result iterator that manages an OpenTelemetry span lifecycle. The span is ended when the + * iterator is closed. Events are added to the span as results are iterated. */ public class TracingIterator extends DelegateResultIterator { - private TraceScope scope; + private final Span span; + private final Scope scope; private boolean started; /** - * @param scope a scope with a non-null span - * @param iterator delegate + * @param span the OpenTelemetry span to manage + * @param scope the scope that makes the span current + * @param iterator delegate iterator */ - public TracingIterator(TraceScope scope, ResultIterator iterator) { + public TracingIterator(Span span, Scope scope, ResultIterator iterator) { super(iterator); + this.span = span; this.scope = scope; } @Override public void close() throws SQLException { - scope.close(); + try { + span.setStatus(StatusCode.OK); + } finally { + try { + scope.close(); + } finally { + span.end(); + } + } super.close(); } @Override public Tuple next() throws SQLException { if (!started) { - scope.getSpan().addTimelineAnnotation("First request completed"); + span.addEvent("First request completed"); started = true; } return super.next(); @@ -57,6 +71,6 @@ public Tuple next() throws SQLException { @Override public String toString() { - return "TracingIterator [scope=" + scope + ", started=" + started + "]"; + return "TracingIterator [span=" + span + ", started=" + started + "]"; } } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java deleted file mode 100644 index 9af7905634c..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/TracingUtils.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import java.nio.charset.StandardCharsets; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.htrace.Span; - -/** - * Utilities for tracing - */ -public class TracingUtils { - public static final String METRIC_SOURCE_KEY = "phoenix."; - - /** Set context to enable filtering */ - public static final String METRICS_CONTEXT = "tracing"; - - /** Marker metric to ensure that we register the tracing mbeans */ - public static final String METRICS_MARKER_CONTEXT = "marker"; - - public static void addAnnotation(Span span, String message, int value) { - span.addKVAnnotation(message.getBytes(StandardCharsets.UTF_8), - Bytes.toBytes(Integer.toString(value))); - } - - public static Pair readAnnotation(byte[] key, byte[] value) { - return new Pair(new String(key, StandardCharsets.UTF_8), Bytes.toString(value)); - } - - /** - * @see #getTraceMetricName(String) - */ - public static final String getTraceMetricName(long traceId) { - return getTraceMetricName(Long.toString(traceId)); - } - - /** - * @param traceId unique id of the trace - * @return the name of the metric record that should be generated for a given trace - */ - public static final String getTraceMetricName(String traceId) { - return METRIC_SOURCE_KEY + traceId; - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java deleted file mode 100644 index 0646feeeefa..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/ConfigurationAdapter.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace.util; - -import org.apache.hadoop.conf.Configuration; -import org.apache.phoenix.jdbc.PhoenixConnection; - -/** - * Helper class to wrap access to configured properties. - */ -abstract class ConfigurationAdapter { - - public abstract String get(String key, String defaultValue); - - public static class ConnectionConfigurationAdapter extends ConfigurationAdapter { - private PhoenixConnection conn; - - public ConnectionConfigurationAdapter(PhoenixConnection connection) { - this.conn = connection; - } - - @Override - public String get(String key, String defaultValue) { - return conn.getQueryServices().getProps().get(key, defaultValue); - } - } - - public static class HadoopConfigConfigurationAdapter extends ConfigurationAdapter { - private Configuration conf; - - public HadoopConfigConfigurationAdapter(Configuration conf) { - this.conf = conf; - } - - @Override - public String get(String key, String defaultValue) { - return conf.get(key, defaultValue); - } - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java deleted file mode 100644 index 80fca8d713e..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/NullSpan.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace.util; - -import java.util.Collections; -import java.util.List; -import java.util.Map; -import org.apache.htrace.Span; -import org.apache.htrace.TimelineAnnotation; -import org.apache.phoenix.util.StringUtil; - -/** - * Fake {@link Span} that doesn't save any state, in place of null return values, to avoid - * null check. - */ -public class NullSpan implements Span { - - public static final Span INSTANCE = new NullSpan(); - - /** - * Private constructor to limit garbage - */ - private NullSpan() { - } - - @Override - public void stop() { - } - - @Override - public long getStartTimeMillis() { - return 0; - } - - @Override - public long getStopTimeMillis() { - return 0; - } - - @Override - public long getAccumulatedMillis() { - return 0; - } - - @Override - public boolean isRunning() { - return false; - } - - @Override - public String getDescription() { - return "NullSpan"; - } - - @Override - public long getSpanId() { - return 0; - } - - @Override - public long getTraceId() { - return 0; - } - - @Override - public Span child(String description) { - return INSTANCE; - } - - @Override - public long getParentId() { - return 0; - } - - @Override - public void addKVAnnotation(byte[] key, byte[] value) { - } - - @Override - public void addTimelineAnnotation(String msg) { - } - - @Override - public Map getKVAnnotations() { - return Collections.emptyMap(); - } - - @Override - public List getTimelineAnnotations() { - return Collections.emptyList(); - } - - @Override - public String getProcessId() { - return null; - } - - @Override - public String toJson() { - return StringUtil.EMPTY_STRING; - } -} diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java deleted file mode 100644 index e6381068978..00000000000 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/trace/util/Tracing.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace.util; - -import static org.apache.phoenix.util.StringUtil.toBytes; - -import edu.umd.cs.findbugs.annotations.NonNull; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.Callable; -import javax.annotation.Nullable; -import org.apache.hadoop.conf.Configuration; -import org.apache.htrace.HTraceConfiguration; -import org.apache.htrace.Sampler; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; -import org.apache.htrace.Tracer; -import org.apache.htrace.impl.ProbabilitySampler; -import org.apache.htrace.wrappers.TraceCallable; -import org.apache.htrace.wrappers.TraceRunnable; -import org.apache.phoenix.call.CallRunner; -import org.apache.phoenix.call.CallWrapper; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.parse.TraceStatement; -import org.apache.phoenix.query.QueryServices; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.trace.TraceSpanReceiver; -import org.apache.phoenix.trace.TraceWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.phoenix.thirdparty.com.google.common.base.Function; -import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions; - -/** - * Helper class to manage using the {@link Tracer} within Phoenix - */ -public class Tracing { - - private static final Logger LOGGER = LoggerFactory.getLogger(Tracing.class); - - private static final String SEPARATOR = "."; - // Constants for tracing across the wire - public static final String TRACE_ID_ATTRIBUTE_KEY = "phoenix.trace.traceid"; - public static final String SPAN_ID_ATTRIBUTE_KEY = "phoenix.trace.spanid"; - - // Constants for passing into the metrics system - private static final String TRACE_METRIC_PREFIX = "phoenix.trace.instance"; - - /** - * Manage the types of frequencies that we support. By default, we never turn on tracing. - */ - public static enum Frequency { - NEVER("never", CREATE_NEVER), // default - ALWAYS("always", CREATE_ALWAYS), - PROBABILITY("probability", CREATE_PROBABILITY); - - String key; - Function> builder; - - private Frequency(String key, Function> builder) { - this.key = key; - this.builder = builder; - } - - public String getKey() { - return key; - } - - static Frequency getSampler(String key) { - for (Frequency type : Frequency.values()) { - if (type.key.equals(key)) { - return type; - } - } - return NEVER; - } - } - - private static Function> CREATE_ALWAYS = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter arg0) { - return Sampler.ALWAYS; - } - }; - - private static Function> CREATE_NEVER = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter arg0) { - return Sampler.NEVER; - } - }; - - private static Function> CREATE_PROBABILITY = - new Function>() { - @Override - public Sampler apply(ConfigurationAdapter conf) { - // get the connection properties for the probability information - Map items = new HashMap(); - items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, - conf.get(QueryServices.TRACING_PROBABILITY_THRESHOLD_ATTRIB, - Double.toString(QueryServicesOptions.DEFAULT_TRACING_PROBABILITY_THRESHOLD))); - return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); - } - }; - - public static Sampler getConfiguredSampler(PhoenixConnection connection) { - String tracelevel = connection.getQueryServices().getProps() - .get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); - return getSampler(tracelevel, - new ConfigurationAdapter.ConnectionConfigurationAdapter(connection)); - } - - public static Sampler getConfiguredSampler(Configuration conf) { - String tracelevel = - conf.get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ); - return getSampler(tracelevel, new ConfigurationAdapter.HadoopConfigConfigurationAdapter(conf)); - } - - public static Sampler getConfiguredSampler(TraceStatement traceStatement) { - double samplingRate = traceStatement.getSamplingRate(); - if (samplingRate >= 1.0) { - return Sampler.ALWAYS; - } else if (samplingRate < 1.0 && samplingRate > 0.0) { - Map items = new HashMap(); - items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate)); - return new ProbabilitySampler(HTraceConfiguration.fromMap(items)); - } else { - return Sampler.NEVER; - } - } - - private static Sampler getSampler(String traceLevel, ConfigurationAdapter conf) { - return Frequency.getSampler(traceLevel).builder.apply(conf); - } - - public static void setSampling(Properties props, Frequency freq) { - props.setProperty(QueryServices.TRACING_FREQ_ATTRIB, freq.key); - } - - /** - * Start a span with the currently configured sampling frequency. Creates a new 'current' span on - * this thread - the previous 'current' span will be replaced with this newly created span. - *

- * Hands back the direct span as you shouldn't be detaching the span - use {@link TraceRunnable} - * instead to detach a span from this operation. - * @param connection from which to read parameters - * @param string description of the span to start - * @return the underlying span. - */ - public static TraceScope startNewSpan(PhoenixConnection connection, String string) { - Sampler sampler = connection.getSampler(); - TraceScope scope = Trace.startSpan(string, sampler); - addCustomAnnotationsToSpan(scope.getSpan(), connection); - return scope; - } - - public static String getSpanName(Span span) { - return Tracing.TRACE_METRIC_PREFIX + span.getTraceId() + SEPARATOR + span.getParentId() - + SEPARATOR + span.getSpanId(); - } - - public static Span child(Span s, String d) { - if (s == null) { - return NullSpan.INSTANCE; - } - return s.child(d); - } - - /** - * Wrap the callable in a TraceCallable, if tracing. - * @param callable to call - * @param description description of the operation being run. If null uses the current - * thread name - * @return The callable provided, wrapped if tracing, 'callable' if not. - */ - public static Callable wrap(Callable callable, String description) { - if (Trace.isTracing()) { - return new TraceCallable(Trace.currentSpan(), callable, description); - } - return callable; - } - - /** - * Helper to automatically start and complete tracing on the given method, used in conjuction with - * {@link CallRunner#run}. - *

- * This will always attempt start a new span (which will always start, unless the {@link Sampler} - * says it shouldn't be traced). If you are just looking for flexible tracing that only turns on - * if the current thread/query is already tracing, use {@link #wrap(Callable, String)} or - * {@link Trace#wrap(Callable)}. - *

- * Ensures that the trace is closed, even if there is an exception from the - * {@link org.apache.phoenix.call.CallRunner.CallableThrowable}. - *

- * Generally, this should wrap a long-running operation. - * @param conn connection from which to determine if we are tracing, ala - * {@link #startNewSpan(PhoenixConnection, String)} - * @param desc description of the operation being run - * @return the value returned from the call - */ - public static CallWrapper withTracing(PhoenixConnection conn, String desc) { - return new TracingWrapper(conn, desc); - } - - private static void addCustomAnnotationsToSpan(@Nullable Span span, - @NonNull PhoenixConnection conn) { - Preconditions.checkNotNull(conn); - - if (span == null) { - return; - } - Map annotations = conn.getCustomTracingAnnotations(); - // copy over the annotations as bytes - for (Map.Entry annotation : annotations.entrySet()) { - span.addKVAnnotation(toBytes(annotation.getKey()), toBytes(annotation.getValue())); - } - } - - private static class TracingWrapper implements CallWrapper { - private TraceScope scope; - private final PhoenixConnection conn; - private final String desc; - - public TracingWrapper(PhoenixConnection conn, String desc) { - this.conn = conn; - this.desc = desc; - } - - @Override - public void before() { - scope = Tracing.startNewSpan(conn, "Executing " + desc); - } - - @Override - public void after() { - scope.close(); - } - } - - /** - * Track if the tracing system has been initialized for phoenix - */ - private static boolean initialized = false; - private static TraceSpanReceiver traceSpanReceiver = null; - - /** - * Add the phoenix span receiver so we can log the traces. We have a single trace source for the - * whole JVM - */ - public synchronized static void addTraceMetricsSource() { - try { - QueryServicesOptions options = QueryServicesOptions.withDefaults(); - if (!initialized && options.isTracingEnabled()) { - traceSpanReceiver = new TraceSpanReceiver(); - Trace.addReceiver(traceSpanReceiver); - TraceWriter traceWriter = new TraceWriter(options.getTableName(), - options.getTracingThreadPoolSize(), options.getTracingBatchSize()); - traceWriter.start(); - } - } catch (RuntimeException e) { - LOGGER.warn("Tracing will outputs will not be written to any metrics sink! No " - + "TraceMetricsSink found on the classpath", e); - } catch (IllegalAccessError e) { - // This is an issue when we have a class incompatibility error, such as when running - // within SquirrelSQL which uses an older incompatible version of commons-collections. - // Seeing as this only results in disabling tracing, we swallow this exception and just - // continue on without tracing. - LOGGER.warn("Class incompatibility while initializing metrics, metrics will be disabled", e); - } - initialized = true; - } - - public static TraceSpanReceiver getTraceSpanReceiver() { - return traceSpanReceiver; - } - - public static boolean isTraceOn(String traceOption) { - Preconditions.checkArgument(traceOption != null); - if (traceOption.equalsIgnoreCase("ON")) return true; - if (traceOption.equalsIgnoreCase("OFF")) return false; - else { - throw new IllegalArgumentException("Unknown tracing option: " + traceOption); - } - } - - /** - * Check whether tracing is generally enabled. - * @return true If tracing is enabled, false otherwise - */ - public static boolean isTracing() { - return Trace.isTracing(); - } -} diff --git a/phoenix-core-server/pom.xml b/phoenix-core-server/pom.xml index e3abcb3558b..2460e9f6731 100644 --- a/phoenix-core-server/pom.xml +++ b/phoenix-core-server/pom.xml @@ -125,10 +125,6 @@ com.github.stephenc.findbugs findbugs-annotations - - org.apache.htrace - htrace-core - com.google.protobuf protobuf-java @@ -173,6 +169,21 @@ com.google.code.findbugs jsr305 + + + + io.opentelemetry + opentelemetry-api + provided + + + + io.opentelemetry + opentelemetry-context + provided + diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java index 723eaee0391..68884c70afd 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java @@ -20,6 +20,8 @@ import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.CDC_DATA_TABLE_DEF; import static org.apache.phoenix.util.ScanUtil.getPageSizeMsForFilter; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -48,8 +50,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; import org.apache.phoenix.execute.TupleProjector; import org.apache.phoenix.filter.PagingFilter; @@ -60,6 +60,7 @@ import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.StaleRegionBoundaryCacheException; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.CDCUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.ScanUtil; @@ -212,10 +213,9 @@ private void overrideDelegate() throws IOException { // and region servers to crash. See https://issues.apache.org/jira/browse/PHOENIX-1596 // TraceScope can't be used here because closing the scope will end up calling // currentSpan.stop() and that should happen only when we are closing the scanner. - final Span savedSpan = Trace.currentSpan(); final Span child = - Trace.startSpan(BaseScannerRegionObserverConstants.SCANNER_OPENED_TRACE_INFO, savedSpan) - .getSpan(); + PhoenixTracing.createSpan(BaseScannerRegionObserverConstants.SCANNER_OPENED_TRACE_INFO); + final Scope childScope = child.makeCurrent(); try { RegionScanner scanner = doPostScannerOpen(c, scan, delegate); scanner = new DelegateRegionScanner(scanner) { @@ -227,7 +227,7 @@ public void close() throws IOException { delegate.close(); } finally { if (child != null) { - child.stop(); + child.end(); } } } @@ -240,10 +240,10 @@ public void close() throws IOException { } finally { try { if (!success && child != null) { - child.stop(); + child.end(); } } finally { - Trace.continueSpan(savedSpan); + childScope.close(); } } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index 9d3d0b3ca47..32222b13d5f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -271,7 +271,6 @@ import org.apache.phoenix.schema.types.PTinyint; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; -import org.apache.phoenix.trace.util.Tracing; import org.apache.phoenix.transaction.TransactionFactory; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.CDCUtil; @@ -688,7 +687,7 @@ public void start(CoprocessorEnvironment env) throws IOException { LOGGER.info("Starting Tracing-Metrics Systems"); // Start the phoenix trace collection - Tracing.addTraceMetricsSource(); + // OpenTelemetry tracing is initialized via the Java Agent at runtime. Metrics.ensureConfigured(); metricsSource = MetricsMetadataSourceFactory.getMetadataMetricsSource(); GlobalCache.getInstance(this.env).setMetricsSource(metricsSource); diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java index fc55b2435dc..ed05613ac35 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/IndexRegionObserver.java @@ -27,6 +27,8 @@ import static org.apache.phoenix.index.PhoenixIndexBuilderHelper.RETURN_RESULT; import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.EOFException; @@ -79,9 +81,6 @@ import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.WritableUtils; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; import org.apache.phoenix.compile.ScanRanges; import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment; import org.apache.phoenix.coprocessor.generated.PTableProtos; @@ -129,8 +128,7 @@ import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PInteger; import org.apache.phoenix.schema.types.PVarbinary; -import org.apache.phoenix.trace.TracingUtils; -import org.apache.phoenix.trace.util.NullSpan; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.ByteUtil; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EncodedColumnsUtil; @@ -1276,12 +1274,9 @@ private void preparePreIndexMutations(BatchMutateContext context, long batchTime PhoenixIndexMetaData indexMetaData) throws Throwable { List maintainers = indexMetaData.getIndexMaintainers(); // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - current.addTimelineAnnotation("Built index updates, doing preStep"); + Span current = PhoenixTracing.createSpan("phoenix.index.build.updates"); + try (Scope ignored = current.makeCurrent()) { + current.addEvent("Built index updates, doing preStep"); // The rest of this method is for handling global index updates context.indexUpdates = ArrayListMultimap.> create(); @@ -1315,7 +1310,9 @@ private void preparePreIndexMutations(BatchMutateContext context, long batchTime } } } - TracingUtils.addAnnotation(current, "index update count", updateCount); + current.setAttribute("phoenix.index.update.count", updateCount); + } finally { + current.end(); } } @@ -1873,19 +1870,17 @@ private void doIndexWritesWithExceptions(BatchMutateContext context, boolean pos } // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = - Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } - current.addTimelineAnnotation( - "Actually doing " + (post ? "post" : "pre") + " index update for first time"); + Span current = PhoenixTracing.createSpan("phoenix.index.write." + (post ? "post" : "pre")); + try (Scope ignored = current.makeCurrent()) { + current + .addEvent("Actually doing " + (post ? "post" : "pre") + " index update for first time"); if (post) { postWriter.write(indexUpdates, false, context.clientVersion); } else { preWriter.write(indexUpdates, false, context.clientVersion); } + } finally { + current.end(); } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java index e33188c3fb7..f61ad157159 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/Indexer.java @@ -19,6 +19,8 @@ import static org.apache.phoenix.hbase.index.util.IndexManagementUtil.rethrowIndexingException; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -51,9 +53,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment; import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.ReplayWrite; import org.apache.phoenix.hbase.index.LockManager.RowLock; @@ -73,8 +72,7 @@ import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache; import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy; import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.trace.TracingUtils; -import org.apache.phoenix.trace.util.NullSpan; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.apache.phoenix.util.ScanUtil; @@ -488,27 +486,30 @@ public void preBatchMutateWithExceptions(ObserverContext> indexUpdates = null; + Span current = PhoenixTracing.createSpan("phoenix.index.build.updates"); + try (Scope ignored = current.makeCurrent()) { long start = EnvironmentEdgeManager.currentTimeMillis(); // get the index updates for all elements in this batch - Collection> indexUpdates = - this.builder.getIndexUpdate(miniBatchOp, mutations); + indexUpdates = this.builder.getIndexUpdate(miniBatchOp, mutations); long duration = EnvironmentEdgeManager.currentTimeMillis() - start; if (duration >= slowIndexPrepareThreshold) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold)); + LOGGER.debug(getCallTooSlowMessage("preBatchMutateWithExceptions", duration, + slowIndexPrepareThreshold)); } metricSource.incrementNumSlowIndexPrepareCalls(dataTableName); } metricSource.updateIndexPrepareTime(dataTableName, duration); - current.addTimelineAnnotation("Built index updates, doing preStep"); - TracingUtils.addAnnotation(current, "index update count", indexUpdates.size()); + current.addEvent("Built index updates, doing preStep"); + current.setAttribute("phoenix.index.update.count", (long) indexUpdates.size()); + } finally { + current.end(); + } + + if (indexUpdates != null) { byte[] tableName = c.getEnvironment().getRegion().getTableDescriptor().getTableName().getName(); Iterator> indexUpdatesItr = indexUpdates.iterator(); @@ -605,14 +606,11 @@ private void doPostWithExceptions(ObserverContext } // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Completing index writes")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } + Span current = PhoenixTracing.createSpan("phoenix.index.write.complete"); + try (Scope ignored = current.makeCurrent()) { long start = EnvironmentEdgeManager.currentTimeMillis(); - current.addTimelineAnnotation("Actually doing index update for first time"); + current.addEvent("Actually doing index update for first time"); writer.writeAndHandleFailure(context.indexUpdates, false, context.clientVersion); long duration = EnvironmentEdgeManager.currentTimeMillis() - start; @@ -623,6 +621,8 @@ private void doPostWithExceptions(ObserverContext metricSource.incrementNumSlowIndexWriteCalls(dataTableName); } metricSource.updateIndexWriteTime(dataTableName, duration); + } finally { + current.end(); } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java index 212f15b8bac..d9261b82cad 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/hbase/index/LockManager.java @@ -17,15 +17,16 @@ */ package org.apache.phoenix.hbase.index; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.IOException; import java.io.InterruptedIOException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,12 +54,14 @@ public LockManager() { */ public RowLock lockRow(ImmutableBytesPtr rowKey, long waitDurationMs) throws IOException { RowLockImpl rowLock = new RowLockImpl(rowKey); - TraceScope traceScope = null; + Span span = null; + Scope scope = null; // If we're tracing start a span to show how long this took. - if (Trace.isTracing()) { - traceScope = Trace.startSpan("LockManager.lockRow"); - traceScope.getSpan().addTimelineAnnotation("Getting a row lock"); + if (PhoenixTracing.isRecording()) { + span = PhoenixTracing.createSpan("phoenix.lock.row"); + scope = span.makeCurrent(); + span.addEvent("Getting a row lock"); } boolean success = false; try { @@ -93,11 +96,12 @@ public RowLock lockRow(ImmutableBytesPtr rowKey, long waitDurationMs) throws IOE Thread.currentThread().interrupt(); throw iie; } finally { - if (traceScope != null) { + if (span != null) { if (!success) { - traceScope.getSpan().addTimelineAnnotation("Failed to get row lock"); + span.addEvent("Failed to get row lock"); } - traceScope.close(); + if (scope != null) scope.close(); + span.end(); } } } diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java index c45b93eb115..e5652c8537f 100644 --- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java +++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java @@ -17,6 +17,8 @@ */ package org.apache.phoenix.index; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.context.Scope; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -34,17 +36,13 @@ import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.TraceScope; import org.apache.phoenix.coprocessor.DelegateRegionCoprocessorEnvironment; import org.apache.phoenix.coprocessorclient.MetaDataProtocol; import org.apache.phoenix.execute.PhoenixTxIndexMutationGenerator; import org.apache.phoenix.hbase.index.write.IndexWriter; import org.apache.phoenix.hbase.index.write.LeaveIndexActiveFailurePolicy; import org.apache.phoenix.hbase.index.write.ParallelWriterIndexCommitter; -import org.apache.phoenix.trace.TracingUtils; -import org.apache.phoenix.trace.util.NullSpan; +import org.apache.phoenix.trace.PhoenixTracing; import org.apache.phoenix.transaction.PhoenixTransactionContext; import org.apache.phoenix.util.ClientUtil; import org.apache.phoenix.util.ServerUtil.ConnectionType; @@ -157,11 +155,8 @@ public void preBatchMutate(ObserverContext c, Collection> indexUpdates = null; // get the current span, or just use a null-span to avoid a bunch of if statements - try (TraceScope scope = Trace.startSpan("Starting to build index updates")) { - Span current = scope.getSpan(); - if (current == null) { - current = NullSpan.INSTANCE; - } + Span current = PhoenixTracing.createSpan("phoenix.index.build.updates"); + try (Scope ignored = current.makeCurrent()) { RegionCoprocessorEnvironment env = c.getEnvironment(); PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext(); @@ -196,12 +191,15 @@ public void preBatchMutate(ObserverContext c, context.indexUpdates = indexUpdates; } - current.addTimelineAnnotation("Built index updates, doing preStep"); - TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size()); + current.addEvent("Built index updates, doing preStep"); + current.setAttribute("phoenix.index.update.count", (long) context.indexUpdates.size()); } catch (Throwable t) { + PhoenixTracing.setError(current, t); String msg = "Failed to update index with entries:" + indexUpdates; LOGGER.error(msg, t); ClientUtil.throwIOException(msg, t); + } finally { + current.end(); } } @@ -213,23 +211,22 @@ public void postBatchMutateIndispensably(ObserverContextprotobuf-java test - - org.apache.htrace - htrace-core - test - org.slf4j slf4j-api diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java deleted file mode 100644 index 98b84395ffb..00000000000 --- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.apache.phoenix.util.PhoenixRuntime.ANNOTATION_ATTRIB_PREFIX; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Collections; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.impl.MilliSpan; -import org.apache.phoenix.end2end.ParallelStatsDisabledIT; -import org.apache.phoenix.jdbc.DelegateConnection; -import org.apache.phoenix.trace.util.Tracing; -import org.apache.phoenix.trace.util.Tracing.Frequency; -import org.apache.phoenix.util.PhoenixRuntime; -import org.apache.phoenix.util.PropertiesUtil; -import org.junit.After; -import org.junit.Before; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Base test for tracing tests - helps manage getting tracing/non-tracing connections, as well as - * any supporting utils. - */ - -public abstract class BaseTracingTestIT extends ParallelStatsDisabledIT { - - private static final Logger LOGGER = LoggerFactory.getLogger(BaseTracingTestIT.class); - - protected CountDownLatch latch; - protected int defaultTracingThreadPoolForTest = 1; - protected int defaultTracingBatchSizeForTest = 1; - protected String tracingTableName; - protected TraceSpanReceiver traceSpanReceiver = null; - protected TestTraceWriter testTraceWriter = null; - - @Before - public void setup() { - tracingTableName = "TRACING_" + generateUniqueName(); - traceSpanReceiver = new TraceSpanReceiver(); - Trace.addReceiver(traceSpanReceiver); - testTraceWriter = new TestTraceWriter(tracingTableName, defaultTracingThreadPoolForTest, - defaultTracingBatchSizeForTest); - } - - @After - public void cleanUp() { - Trace.removeReceiver(traceSpanReceiver); - if (testTraceWriter != null) testTraceWriter.stop(); - } - - public static Connection getConnectionWithoutTracing() throws SQLException { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - return getConnectionWithoutTracing(props); - } - - public static Connection getConnectionWithoutTracing(Properties props) throws SQLException { - Connection conn = getConnectionWithTracingFrequency(props, Frequency.NEVER); - return conn; - } - - public static Connection getTracingConnection() throws Exception { - return getTracingConnection(Collections. emptyMap(), null); - } - - public static Connection getTracingConnection(Map customAnnotations, - String tenantId) throws Exception { - Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); - for (Map.Entry annot : customAnnotations.entrySet()) { - props.put(ANNOTATION_ATTRIB_PREFIX + annot.getKey(), annot.getValue()); - } - if (tenantId != null) { - props.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); - } - return getConnectionWithTracingFrequency(props, Tracing.Frequency.ALWAYS); - } - - public static Connection getConnectionWithTracingFrequency(Properties props, - Tracing.Frequency frequency) throws SQLException { - Tracing.setSampling(props, frequency); - return DriverManager.getConnection(getUrl(), props); - } - - protected Span createNewSpan(long traceid, long parentid, long spanid, String description, - long startTime, long endTime, String processid, String... tags) { - - Span span = new MilliSpan.Builder().description(description).traceId(traceid) - .parents(new long[] { parentid }).spanId(spanid).processId(processid).begin(startTime) - .end(endTime).build(); - - int tagCount = 0; - for (String annotation : tags) { - span.addKVAnnotation((Integer.toString(tagCount++)).getBytes(), annotation.getBytes()); - } - return span; - } - - private static class CountDownConnection extends DelegateConnection { - private CountDownLatch commit; - - public CountDownConnection(Connection conn, CountDownLatch commit) { - super(conn); - this.commit = commit; - } - - @Override - public void commit() throws SQLException { - super.commit(); - commit.countDown(); - } - - } - - protected class TestTraceWriter extends TraceWriter { - - public TestTraceWriter(String tableName, int numThreads, int batchSize) { - super(tableName, numThreads, batchSize); - } - - @Override - protected Connection getConnection(String tableName) { - try { - Connection connection = new CountDownConnection(getConnectionWithoutTracing(), latch); - if (!traceTableExists(connection, tableName)) { - createTable(connection, tableName); - } - return connection; - } catch (SQLException e) { - LOGGER.error("New connection failed for tracing Table: " + tableName, e); - return null; - } - } - - @Override - protected TraceSpanReceiver getTraceSpanReceiver() { - return traceSpanReceiver; - } - - public void stop() { - if (executor == null) return; - try { - executor.shutdownNow(); - executor.awaitTermination(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOGGER.error("Failed to stop the thread. ", e); - } - } - - } - -} diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java deleted file mode 100644 index bf87c2b2b9f..00000000000 --- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.sql.Connection; -import java.util.Collection; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.apache.htrace.Span; -import org.apache.htrace.Tracer; -import org.apache.phoenix.end2end.ParallelStatsDisabledTest; -import org.apache.phoenix.query.QueryServicesOptions; -import org.apache.phoenix.trace.TraceReader.SpanInfo; -import org.apache.phoenix.trace.TraceReader.TraceHolder; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test that the logging sink stores the expected metrics/stats - */ -@Category(ParallelStatsDisabledTest.class) -public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT { - - /** - * IT should create the target table if it hasn't been created yet, but not fail if the table has - * already been created - * @throws Exception on failure - */ - @Test - public void testCreatesTable() throws Exception { - - Connection conn = getConnectionWithoutTracing(); - - // check for existence of the tracing table - try { - String ddl = "CREATE TABLE " + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME; - conn.createStatement().execute(ddl); - fail("Table " + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME - + " was not created by the metrics sink"); - } catch (Exception e) { - // expected - } - } - - /** - * Simple metrics writing and reading check, that uses the standard wrapping in the - * {@link TraceWriter} - * @throws Exception on failure - */ - @Test - public void writeMetrics() throws Exception { - - Connection conn = getConnectionWithoutTracing(); - latch = new CountDownLatch(1); - testTraceWriter.start(); - - // create a simple metrics record - long traceid = 987654; - String description = "Some generic trace"; - long spanid = 10; - long parentid = 11; - long startTime = 12; - long endTime = 13; - String processid = "Some process"; - String annotation = "test annotation for a span"; - - Span span = createNewSpan(traceid, parentid, spanid, description, startTime, endTime, processid, - annotation); - - Tracer.getInstance().deliver(span); - assertTrue("Span never committed to table", latch.await(30, TimeUnit.SECONDS)); - - // make sure we only get expected stat entry (matcing the trace id), otherwise we could the - // stats for the update as well - TraceReader reader = new TraceReader(conn, tracingTableName); - Collection traces = reader.readAll(10); - assertEquals("Wrong number of traces in the tracing table", 1, traces.size()); - - // validate trace - TraceHolder trace = traces.iterator().next(); - // we are just going to get an orphan span b/c we don't send in a parent - assertEquals("Didn't get expected orphaned spans!" + trace.orphans, 1, trace.orphans.size()); - - assertEquals(traceid, trace.traceid); - SpanInfo spanInfo = trace.orphans.get(0); - assertEquals(description, spanInfo.description); - assertEquals(parentid, spanInfo.getParentIdForTesting()); - assertEquals(startTime, spanInfo.start); - assertEquals(endTime, spanInfo.end); - assertEquals("Wrong number of tags", 0, spanInfo.tagCount); - assertEquals("Wrong number of annotations", 1, spanInfo.annotationCount); - } - -} diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java deleted file mode 100644 index 9c291a8f8ae..00000000000 --- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import org.apache.hadoop.metrics2.MetricsTag; - -/** - * Simple Tag implementation for testing - */ -public class PhoenixTagImpl extends MetricsTag { - public PhoenixTagImpl(String name, String description, String value) { - super(new MetricsInfoImpl(name, description), value); - } -} diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java deleted file mode 100644 index 142b1fcca41..00000000000 --- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB; -import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES; -import static org.junit.Assert.*; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.*; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.htrace.*; -import org.apache.htrace.impl.ProbabilitySampler; -import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants; -import org.apache.phoenix.end2end.ParallelStatsDisabledTest; -import org.apache.phoenix.jdbc.PhoenixConnection; -import org.apache.phoenix.trace.TraceReader.SpanInfo; -import org.apache.phoenix.trace.TraceReader.TraceHolder; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableMap; - -/** - * Test that the logging sink stores the expected metrics/stats - */ -@Category(ParallelStatsDisabledTest.class) -@Ignore("Will need to revisit for new HDFS/HBase/HTrace, broken on 5.x") -public class PhoenixTracingEndToEndIT extends BaseTracingTestIT { - - private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixTracingEndToEndIT.class); - private static final int MAX_RETRIES = 10; - private String enabledForLoggingTable; - private String enableForLoggingIndex; - - @Before - public void setupMetrics() throws Exception { - enabledForLoggingTable = "ENABLED_FOR_LOGGING_" + generateUniqueName(); - enableForLoggingIndex = "ENABALED_FOR_LOGGING_INDEX_" + generateUniqueName(); - } - - /** - * Simple test that we can correctly write spans to the phoenix table - * @throws Exception on failure - */ - @Test - public void testWriteSpans() throws Exception { - - LOGGER.info("testWriteSpans TableName: " + tracingTableName); - // watch our sink so we know when commits happen - latch = new CountDownLatch(1); - - testTraceWriter.start(); - - // write some spans - TraceScope trace = Trace.startSpan("Start write test", Sampler.ALWAYS); - Span span = trace.getSpan(); - - // add a child with some annotations - Span child = span.child("child 1"); - child.addTimelineAnnotation("timeline annotation"); - TracingUtils.addAnnotation(child, "test annotation", 10); - child.stop(); - - // sleep a little bit to get some time difference - Thread.sleep(100); - - trace.close(); - - // pass the trace on - Tracer.getInstance().deliver(span); - - // wait for the tracer to actually do the write - assertTrue("Sink not flushed. commit() not called on the connection", - latch.await(60, TimeUnit.SECONDS)); - - // look for the writes to make sure they were made - Connection conn = getConnectionWithoutTracing(); - checkStoredTraces(conn, new TraceChecker() { - @Override - public boolean foundTrace(TraceHolder trace, SpanInfo info) { - if (info.description.equals("child 1")) { - assertEquals("Not all annotations present", 1, info.annotationCount); - assertEquals("Not all tags present", 1, info.tagCount); - boolean found = false; - for (String annotation : info.annotations) { - if (annotation.startsWith("test annotation")) { - found = true; - } - } - assertTrue("Missing the annotations in span: " + info, found); - found = false; - for (String tag : info.tags) { - if (tag.endsWith("timeline annotation")) { - found = true; - } - } - assertTrue("Missing the tags in span: " + info, found); - return true; - } - return false; - } - }); - } - - /** - * Test that span will actually go into the this sink and be written on both side of the wire, - * through the indexing code. - */ - @Test - public void testClientServerIndexingTracing() throws Exception { - - LOGGER.info("testClientServerIndexingTracing TableName: " + tracingTableName); - // one call for client side, one call for server side - latch = new CountDownLatch(2); - testTraceWriter.start(); - - // separate connection so we don't create extra traces - Connection conn = getConnectionWithoutTracing(); - createTestTable(conn, true); - - // trace the requests we send - Connection traceable = getTracingConnection(); - LOGGER.debug("Doing dummy the writes to the tracked table"); - String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)"; - PreparedStatement stmt = traceable.prepareStatement(insert); - stmt.setString(1, "key1"); - stmt.setLong(2, 1); - // this first trace just does a simple open/close of the span. Its not doing anything - // terribly interesting because we aren't auto-committing on the connection, so it just - // updates the mutation state and returns. - stmt.execute(); - stmt.setString(1, "key2"); - stmt.setLong(2, 2); - stmt.execute(); - traceable.commit(); - - // wait for the latch to countdown, as the metrics system is time-based - LOGGER.debug("Waiting for latch to complete!"); - latch.await(200, TimeUnit.SECONDS);// should be way more than GC pauses - - // read the traces back out - - /* - * Expected: 1. Single element trace - for first PreparedStatement#execute span 2. Two element - * trace for second PreparedStatement#execute span a. execute call b. metadata lookup* 3. Commit - * trace. a. Committing to tables i. Committing to single table ii. hbase batch write* i.I. span - * on server i.II. building index updates i.III. waiting for latch where '*' is a generically - * named thread (e.g phoenix-1-thread-X) - */ - boolean indexingCompleted = checkStoredTraces(conn, new TraceChecker() { - @Override - public boolean foundTrace(TraceHolder trace, SpanInfo span) { - String traceInfo = trace.toString(); - // skip logging traces that are just traces about tracing - if (traceInfo.contains(tracingTableName)) { - return false; - } - return traceInfo.contains("Completing index"); - } - }); - - assertTrue("Never found indexing updates", indexingCompleted); - } - - private void createTestTable(Connection conn, boolean withIndex) throws SQLException { - // create a dummy table - String ddl = "create table if not exists " + enabledForLoggingTable + "(" - + "k varchar not null, " + "c1 bigint" + " CONSTRAINT pk PRIMARY KEY (k))"; - conn.createStatement().execute(ddl); - - // early exit if we don't need to create an index - if (!withIndex) { - return; - } - // create an index on the table - we know indexing has some basic tracing - ddl = "CREATE INDEX IF NOT EXISTS " + enableForLoggingIndex + " on " + enabledForLoggingTable - + " (c1)"; - conn.createStatement().execute(ddl); - } - - @Test - public void testScanTracing() throws Exception { - - LOGGER.info("testScanTracing TableName: " + tracingTableName); - - // separate connections to minimize amount of traces that are generated - Connection traceable = getTracingConnection(); - Connection conn = getConnectionWithoutTracing(); - - // one call for client side, one call for server side - latch = new CountDownLatch(2); - testTraceWriter.start(); - - // create a dummy table - createTestTable(conn, false); - - // update the table, but don't trace these, to simplify the traces we read - LOGGER.debug("Doing dummy the writes to the tracked table"); - String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)"; - PreparedStatement stmt = conn.prepareStatement(insert); - stmt.setString(1, "key1"); - stmt.setLong(2, 1); - stmt.execute(); - conn.commit(); - conn.rollback(); - - // setup for next set of updates - stmt.setString(1, "key2"); - stmt.setLong(2, 2); - stmt.execute(); - conn.commit(); - conn.rollback(); - - // do a scan of the table - String read = "SELECT * FROM " + enabledForLoggingTable; - ResultSet results = traceable.createStatement().executeQuery(read); - assertTrue("Didn't get first result", results.next()); - assertTrue("Didn't get second result", results.next()); - results.close(); - - assertTrue("Get expected updates to trace table", latch.await(200, TimeUnit.SECONDS)); - // don't trace reads either - boolean tracingComplete = checkStoredTraces(conn, new TraceChecker() { - - @Override - public boolean foundTrace(TraceHolder currentTrace) { - String traceInfo = currentTrace.toString(); - return traceInfo.contains("Parallel scanner"); - } - }); - assertTrue("Didn't find the parallel scanner in the tracing", tracingComplete); - } - - @Test - public void testScanTracingOnServer() throws Exception { - - LOGGER.info("testScanTracingOnServer TableName: " + tracingTableName); - - // separate connections to minimize amount of traces that are generated - Connection traceable = getTracingConnection(); - Connection conn = getConnectionWithoutTracing(); - - // one call for client side, one call for server side - latch = new CountDownLatch(5); - testTraceWriter.start(); - - // create a dummy table - createTestTable(conn, false); - - // update the table, but don't trace these, to simplify the traces we read - LOGGER.debug("Doing dummy the writes to the tracked table"); - String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)"; - PreparedStatement stmt = conn.prepareStatement(insert); - stmt.setString(1, "key1"); - stmt.setLong(2, 1); - stmt.execute(); - conn.commit(); - - // setup for next set of updates - stmt.setString(1, "key2"); - stmt.setLong(2, 2); - stmt.execute(); - conn.commit(); - - // do a scan of the table - String read = "SELECT COUNT(*) FROM " + enabledForLoggingTable; - ResultSet results = traceable.createStatement().executeQuery(read); - assertTrue("Didn't get count result", results.next()); - // make sure we got the expected count - assertEquals("Didn't get the expected number of row", 2, results.getInt(1)); - results.close(); - - assertTrue("Didn't get expected updates to trace table", latch.await(60, TimeUnit.SECONDS)); - - // don't trace reads either - boolean found = checkStoredTraces(conn, new TraceChecker() { - @Override - public boolean foundTrace(TraceHolder trace) { - String traceInfo = trace.toString(); - return traceInfo.contains(BaseScannerRegionObserverConstants.SCANNER_OPENED_TRACE_INFO); - } - }); - assertTrue("Didn't find the parallel scanner in the tracing", found); - } - - @Test - public void testCustomAnnotationTracing() throws Exception { - - LOGGER.info("testCustomAnnotationTracing TableName: " + tracingTableName); - - final String customAnnotationKey = "myannot"; - final String customAnnotationValue = "a1"; - final String tenantId = "tenant1"; - // separate connections to minimize amount of traces that are generated - Connection traceable = - getTracingConnection(ImmutableMap.of(customAnnotationKey, customAnnotationValue), tenantId); - Connection conn = getConnectionWithoutTracing(); - - // one call for client side, one call for server side - latch = new CountDownLatch(2); - testTraceWriter.start(); - - // create a dummy table - createTestTable(conn, false); - - // update the table, but don't trace these, to simplify the traces we read - LOGGER.debug("Doing dummy the writes to the tracked table"); - String insert = "UPSERT INTO " + enabledForLoggingTable + " VALUES (?, ?)"; - PreparedStatement stmt = conn.prepareStatement(insert); - stmt.setString(1, "key1"); - stmt.setLong(2, 1); - stmt.execute(); - conn.commit(); - conn.rollback(); - - // setup for next set of updates - stmt.setString(1, "key2"); - stmt.setLong(2, 2); - stmt.execute(); - conn.commit(); - conn.rollback(); - - // do a scan of the table - String read = "SELECT * FROM " + enabledForLoggingTable; - ResultSet results = traceable.createStatement().executeQuery(read); - assertTrue("Didn't get first result", results.next()); - assertTrue("Didn't get second result", results.next()); - results.close(); - - assertTrue("Get expected updates to trace table", latch.await(200, TimeUnit.SECONDS)); - - assertAnnotationPresent(customAnnotationKey, customAnnotationValue, conn); - assertAnnotationPresent(TENANT_ID_ATTRIB, tenantId, conn); - // CurrentSCN is also added as an annotation. Not tested here because it screws up test setup. - } - - @Test - public void testTraceOnOrOff() throws Exception { - Connection conn1 = getConnectionWithoutTracing(); // DriverManager.getConnection(getUrl()); - try { - Statement statement = conn1.createStatement(); - ResultSet rs = statement.executeQuery("TRACE ON"); - assertTrue(rs.next()); - PhoenixConnection pconn = (PhoenixConnection) conn1; - long traceId = pconn.getTraceScope().getSpan().getTraceId(); - assertEquals(traceId, rs.getLong(1)); - assertEquals(traceId, rs.getLong("trace_id")); - assertFalse(rs.next()); - assertEquals(Sampler.ALWAYS, pconn.getSampler()); - - rs = statement.executeQuery("TRACE OFF"); - assertTrue(rs.next()); - assertEquals(traceId, rs.getLong(1)); - assertEquals(traceId, rs.getLong("trace_id")); - assertFalse(rs.next()); - assertEquals(Sampler.NEVER, pconn.getSampler()); - - rs = statement.executeQuery("TRACE OFF"); - assertFalse(rs.next()); - - rs = statement.executeQuery("TRACE ON WITH SAMPLING 0.5"); - rs.next(); - assertTrue(((PhoenixConnection) conn1).getSampler() instanceof ProbabilitySampler); - - rs = statement.executeQuery("TRACE ON WITH SAMPLING 1.0"); - assertTrue(rs.next()); - traceId = pconn.getTraceScope().getSpan().getTraceId(); - assertEquals(traceId, rs.getLong(1)); - assertEquals(traceId, rs.getLong("trace_id")); - assertFalse(rs.next()); - assertEquals(Sampler.ALWAYS, pconn.getSampler()); - - rs = statement.executeQuery("TRACE ON WITH SAMPLING 0.5"); - rs.next(); - assertTrue(((PhoenixConnection) conn1).getSampler() instanceof ProbabilitySampler); - - rs = statement.executeQuery("TRACE ON WITH SAMPLING 0.0"); - rs.next(); - assertEquals(Sampler.NEVER, pconn.getSampler()); - - rs = statement.executeQuery("TRACE OFF"); - assertFalse(rs.next()); - - } finally { - conn1.close(); - } - } - - @Test - public void testSingleSpan() throws Exception { - - LOGGER.info("testSingleSpan TableName: " + tracingTableName); - - Properties props = new Properties(TEST_PROPERTIES); - Connection conn = DriverManager.getConnection(getUrl(), props); - latch = new CountDownLatch(1); - testTraceWriter.start(); - - // create a simple metrics record - long traceid = 987654; - Span span = createNewSpan(traceid, Span.ROOT_SPAN_ID, 10, "root", 12, 13, "Some process", - "test annotation for a span"); - - Tracer.getInstance().deliver(span); - assertTrue("Updates not written in table", latch.await(60, TimeUnit.SECONDS)); - - // start a reader - validateTraces(Collections.singletonList(span), conn, traceid, tracingTableName); - } - - /** - * Test multiple spans, within the same trace. Some spans are independent of the parent span, some - * are child spans - * @throws Exception on failure - */ - @Test - public void testMultipleSpans() throws Exception { - - LOGGER.info("testMultipleSpans TableName: " + tracingTableName); - - Connection conn = getConnectionWithoutTracing(); - latch = new CountDownLatch(4); - testTraceWriter.start(); - - // create a simple metrics record - long traceid = 12345; - List spans = new ArrayList(); - - Span span = createNewSpan(traceid, Span.ROOT_SPAN_ID, 7777, "root", 10, 30, "root process", - "root-span tag"); - spans.add(span); - - // then create a child record - span = createNewSpan(traceid, 7777, 6666, "c1", 11, 15, "c1 process", "first child"); - spans.add(span); - - // create a different child - span = createNewSpan(traceid, 7777, 5555, "c2", 11, 18, "c2 process", "second child"); - spans.add(span); - - // create a child of the second child - span = createNewSpan(traceid, 5555, 4444, "c3", 12, 16, "c3 process", "third child"); - spans.add(span); - - for (Span span1 : spans) - Tracer.getInstance().deliver(span1); - - assertTrue("Updates not written in table", latch.await(100, TimeUnit.SECONDS)); - - // start a reader - validateTraces(spans, conn, traceid, tracingTableName); - } - - private void validateTraces(List spans, Connection conn, long traceid, String tableName) - throws Exception { - TraceReader reader = new TraceReader(conn, tableName); - Collection traces = reader.readAll(1); - assertEquals("Got an unexpected number of traces!", 1, traces.size()); - // make sure the trace matches what we wrote - TraceHolder trace = traces.iterator().next(); - assertEquals("Got an unexpected traceid", traceid, trace.traceid); - assertEquals("Got an unexpected number of spans", spans.size(), trace.spans.size()); - - validateTrace(spans, trace); - } - - /** - */ - private void validateTrace(List spans, TraceHolder trace) { - // drop each span into a sorted list so we get the expected ordering - Iterator spanIter = trace.spans.iterator(); - for (Span span : spans) { - SpanInfo spanInfo = spanIter.next(); - LOGGER.info("Checking span:\n" + spanInfo); - - long parentId = span.getParentId(); - if (parentId == Span.ROOT_SPAN_ID) { - assertNull("Got a parent, but it was a root span!", spanInfo.parent); - } else { - assertEquals("Got an unexpected parent span id", parentId, spanInfo.parent.id); - } - - assertEquals("Got an unexpected start time", span.getStartTimeMillis(), spanInfo.start); - assertEquals("Got an unexpected end time", span.getStopTimeMillis(), spanInfo.end); - - int annotationCount = 0; - for (Map.Entry entry : span.getKVAnnotations().entrySet()) { - int count = annotationCount++; - assertEquals("Didn't get expected annotation", - count + " - " + Bytes.toString(entry.getValue()), spanInfo.annotations.get(count)); - } - assertEquals("Didn't get expected number of annotations", annotationCount, - spanInfo.annotationCount); - } - } - - private void assertAnnotationPresent(final String annotationKey, final String annotationValue, - Connection conn) throws Exception { - boolean tracingComplete = checkStoredTraces(conn, new TraceChecker() { - @Override - public boolean foundTrace(TraceHolder currentTrace) { - return currentTrace.toString().contains(annotationKey + " - " + annotationValue); - } - }); - - assertTrue("Didn't find the custom annotation in the tracing", tracingComplete); - } - - private boolean checkStoredTraces(Connection conn, TraceChecker checker) throws Exception { - TraceReader reader = new TraceReader(conn, tracingTableName); - int retries = 0; - boolean found = false; - outer: while (retries < MAX_RETRIES) { - Collection traces = reader.readAll(100); - for (TraceHolder trace : traces) { - LOGGER.info("Got trace: " + trace); - found = checker.foundTrace(trace); - if (found) { - break outer; - } - for (SpanInfo span : trace.spans) { - found = checker.foundTrace(trace, span); - if (found) { - break outer; - } - } - } - LOGGER.info("====== Waiting for tracing updates to be propagated ========"); - Thread.sleep(1000); - retries++; - } - return found; - } - - private abstract class TraceChecker { - public boolean foundTrace(TraceHolder currentTrace) { - return false; - } - - public boolean foundTrace(TraceHolder currentTrace, SpanInfo currentSpan) { - return false; - } - } - -} diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java deleted file mode 100644 index 61e2c8b118b..00000000000 --- a/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import org.apache.hadoop.metrics2.MetricsSink; -import org.apache.phoenix.metrics.Metrics; - -/** - * - */ -public class TracingTestUtil { - - public static void registerSink(MetricsSink sink, String name) { - Metrics.initialize().register(name, "test sink gets logged", sink); - } - - public static void unregisterSink(String name) { - Metrics.initialize().unregisterSource(name); - } -} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java deleted file mode 100644 index 981efbb5d96..00000000000 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.metrics2.impl; - -import org.apache.hadoop.metrics2.MetricsInfo; - -/** - * - */ -public class ExposedMetricCounterLong extends MetricCounterLong { - - /** - */ - public ExposedMetricCounterLong(MetricsInfo info, long value) { - super(info, value); - } -} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java deleted file mode 100644 index 26307a4f2c7..00000000000 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.metrics2.impl; - -import java.util.List; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsTag; - -/** - * Helper class to access the package-private {@link MetricsRecordImpl} - */ -@SuppressWarnings("javadoc") -public class ExposedMetricsRecordImpl extends MetricsRecordImpl { - - /** - */ - public ExposedMetricsRecordImpl(MetricsInfo info, long timestamp, List tags, - Iterable metrics) { - super(info, timestamp, tags, metrics); - } -} diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java deleted file mode 100644 index 43cafcd6d9e..00000000000 --- a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.metrics2.lib; - -/** - * Helper class to expose access to the {@link org.apache.hadoop.metrics2.lib.MetricsInfoImpl} - */ -public class ExposedMetricsInfoImpl extends MetricsInfoImpl { - - /** - */ - public ExposedMetricsInfoImpl(String name, String description) { - super(name, description); - } -} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java index ee9b3db0ec7..890f634aef8 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java @@ -21,13 +21,11 @@ import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.MetricsSink; -import org.apache.phoenix.trace.TracingUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Simple sink that just logs the output of all the metrics that start with - * {@link org.apache.phoenix.trace.TracingUtils#METRIC_SOURCE_KEY} + * Simple sink that just logs the output of all the metrics that start with "phoenix." */ public class LoggingSink implements MetricsSink { @@ -48,7 +46,7 @@ public void putMetrics(MetricsRecord record) { LOGGER.debug("Found record:" + record.name()); for (AbstractMetric metric : record.metrics()) { // just print the metric we care about - if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) { + if (metric.name().startsWith("phoenix.")) { LOGGER.debug("\t metric:" + metric); } } diff --git a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java deleted file mode 100644 index b81b5fd7f59..00000000000 --- a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceSpanReceiverTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.phoenix.trace; - -import static org.junit.Assert.assertTrue; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.htrace.Span; -import org.apache.htrace.Trace; -import org.apache.htrace.Tracer; -import org.apache.htrace.impl.MilliSpan; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * Test that the @{link TraceSpanReceiver} correctly handles different kinds of traces - */ -public class TraceSpanReceiverTest { - - @BeforeClass - public static synchronized void setup() throws Exception { - } - - /** - * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers, but HBase - * writes some strings as well, so we need to be able to handle that too - */ - @Test - public void testNonIntegerAnnotations() { - Span span = getSpan(); - // make sure its less than the length of an integer - - byte[] value = Bytes.toBytes("a"); - byte[] someInt = Bytes.toBytes(1); - assertTrue(someInt.length > value.length); - - // an annotation that is not an integer - span.addKVAnnotation(Bytes.toBytes("key"), value); - - // Create the sink and write the span - TraceSpanReceiver source = new TraceSpanReceiver(); - Trace.addReceiver(source); - - Tracer.getInstance().deliver(span); - - assertTrue(source.getNumSpans() == 1); - } - - @Test - public void testIntegerAnnotations() { - Span span = getSpan(); - - // add annotation through the phoenix interfaces - TracingUtils.addAnnotation(span, "message", 10); - - TraceSpanReceiver source = new TraceSpanReceiver(); - Trace.addReceiver(source); - - Tracer.getInstance().deliver(span); - assertTrue(source.getNumSpans() == 1); - } - - private Span getSpan() { - // Spans with Trace Id as 0 will be rejected (See PHOENIX-3767 for details) - return new MilliSpan("test span", 1, 1, 2, "pid"); - } -} diff --git a/phoenix-mapreduce-byo-shaded-hbase/pom.xml b/phoenix-mapreduce-byo-shaded-hbase/pom.xml index 477cd063451..14bd632287b 100644 --- a/phoenix-mapreduce-byo-shaded-hbase/pom.xml +++ b/phoenix-mapreduce-byo-shaded-hbase/pom.xml @@ -396,8 +396,6 @@ ${shaded.package}.org. org/apache/hadoop/** - - org/apache/htrace/** org/slf4j/** org/apache/commons/logging/** org/apache/log4j/** diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml index 05e068f108e..6f7404b8e0a 100644 --- a/phoenix-server/pom.xml +++ b/phoenix-server/pom.xml @@ -398,8 +398,6 @@ ${shaded.package}.org. org/apache/hadoop/** - - org/apache/htrace/** org/slf4j/** org/apache/commons/logging/** org/apache/log4j/** @@ -455,6 +453,10 @@ io/skip/checksum/errors io/sort/* io/serializations + + io/opentelemetry/** diff --git a/pom.xml b/pom.xml index 3c27ab4f914..640cc029f35 100644 --- a/pom.xml +++ b/pom.xml @@ -116,7 +116,6 @@ 2.0.1 0.5 1.16.0 - 3.1.0-incubating 2.10.5 2.1.31 1.1.3 @@ -223,6 +222,9 @@ ${os.detected.classifier} 3.0.1-b08 + + 1.34.1 + 1.23.1-alpha org.apache.phoenix.shaded org.apache.hadoop.hbase.shaded @@ -1285,11 +1287,6 @@ jcl-over-slf4j ${slf4j.version} - - org.apache.htrace - htrace-core - ${htrace.version} - commons-codec commons-codec @@ -1455,6 +1452,21 @@ javax.el ${glassfish.el.version} + + + + io.opentelemetry + opentelemetry-bom + ${opentelemetry.version} + pom + import + + + + io.opentelemetry.semconv + opentelemetry-semconv + ${opentelemetry-semconv.version} + @@ -1775,6 +1787,13 @@ com.sun.istack.** + + true + HTrace is removed, use OpenTelemetry via PhoenixTracing + + org.apache.htrace.** + +