HBASE-18601: Update Htrace to 4.2
authorTamas Penzes <tamaas@cloudera.com>
Thu, 26 Oct 2017 21:23:42 +0000 (23:23 +0200)
committerMichael Stack <stack@apache.org>
Sat, 11 Nov 2017 18:34:03 +0000 (10:34 -0800)
Updated HTrace version to 4.2
Created TraceUtil class to wrap htrace methods. Uses try with resources.

Signed-off-by: Balazs Meszaros <balazs.meszaros@cloudera.com>
Signed-off-by: Michael Stack <stack@apache.org>
65 files changed:
hbase-backup/pom.xml
hbase-client/pom.xml
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java
hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
hbase-common/pom.xml
hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java [new file with mode: 0644]
hbase-endpoint/pom.xml
hbase-examples/pom.xml
hbase-external-blockcache/pom.xml
hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
hbase-hadoop2-compat/pom.xml
hbase-it/pom.xml
hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
hbase-mapreduce/pom.xml
hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
hbase-procedure/pom.xml
hbase-protocol-shaded/pom.xml
hbase-replication/pom.xml
hbase-rest/pom.xml
hbase-rsgroup/pom.xml
hbase-server/pom.xml
hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java [new file with mode: 0644]
hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
hbase-shell/pom.xml
hbase-shell/src/main/ruby/shell/commands/trace.rb
hbase-spark/pom.xml
hbase-testing-util/pom.xml
hbase-thrift/pom.xml
pom.xml
src/main/asciidoc/_chapters/tracing.adoc

index 147f17f..6282471 100644 (file)
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>net.java.dev.jets3t</groupId>
               <artifactId>jets3t</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 675e813..cc112d4 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
+      <artifactId>htrace-core4</artifactId>
     </dependency>
     <dependency>
       <groupId>org.jruby.jcodings</groupId>
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>net.java.dev.jets3t</groupId>
               <artifactId>jets3t</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 4df1768..91225a7 100644 (file)
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.RetryImmediatelyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -56,7 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
+import org.apache.htrace.core.Tracer;
 
 /**
  * The context, and return value, for a single submit/submitAll call.
@@ -582,7 +583,13 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
       asyncProcess.incTaskCounters(multiAction.getRegions(), server);
       SingleServerRequestRunnable runnable = createSingleServerRequest(
               multiAction, numAttempt, server, callsInProgress);
-      return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", runnable));
+      Tracer tracer = Tracer.curThreadTracer();
+
+      if (tracer == null) {
+        return Collections.singletonList(runnable);
+      } else {
+        return Collections.singletonList(tracer.wrap(runnable, "AsyncProcess.sendMultiAction"));
+      }
     }
 
     // group the actions by the amount of delay
@@ -618,7 +625,7 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
           asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
         }
       }
-      runnable = Trace.wrap(traceText, runnable);
+      runnable = TraceUtil.wrap(runnable, traceText);
       toReturn.add(runnable);
 
     }
index ccfe6ba..b05ad64 100644 (file)
@@ -28,9 +28,9 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
 
 /**
  * A completion service for the RpcRetryingCallerFactory.
@@ -168,7 +168,7 @@ public class ResultBoundedCompletionService<V> {
 
   public void submit(RetryingCallable<V> task, int callTimeout, int id) {
     QueueingFuture<V> newFuture = new QueueingFuture<>(task, callTimeout, id);
-    executor.execute(Trace.wrap(newFuture));
+    executor.execute(TraceUtil.wrap(newFuture, "ResultBoundedCompletionService.submit"));
     tasks[id] = newFuture;
   }
 
index 0524336..fcc6f7c 100644 (file)
@@ -24,10 +24,6 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.isFatalConnectionException;
 import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
 import static org.apache.hadoop.hbase.ipc.IPCUtil.write;
 
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
@@ -55,10 +51,15 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
 import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
+import org.apache.hadoop.hbase.security.SaslUtil;
+import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
@@ -66,17 +67,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHea
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
-import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Thread that reads responses and notifies callers. Each connection owns a socket connected to a
@@ -574,7 +573,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable {
   }
 
   private void tracedWriteRequest(Call call) throws IOException {
-    try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", call.span)) {
+    try (TraceScope ignored = TraceUtil.createTrace("RpcClientImpl.tracedWriteRequest",
+          call.span)) {
       writeRequest(call);
     }
   }
index 5c0689a..72f03f9 100644 (file)
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MetricsConnection;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.Tracer;
 
 /** A call waiting for a value. */
 @InterfaceAudience.Private
@@ -73,7 +73,7 @@ class Call {
     this.timeout = timeout;
     this.priority = priority;
     this.callback = callback;
-    this.span = Trace.currentSpan();
+    this.span = Tracer.getCurrentSpan();
   }
 
   @Override
index 7c0ddf0..8e3e9aa 100644 (file)
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.ipc.RemoteException;
@@ -102,10 +101,11 @@ class IPCUtil {
   static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) {
     RequestHeader.Builder builder = RequestHeader.newBuilder();
     builder.setCallId(call.id);
-    if (call.span != null) {
+    //TODO handle htrace API change, see HBASE-18895
+    /*if (call.span != null) {
       builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId())
-          .setTraceId(call.span.getTraceId()));
-    }
+          .setTraceId(call.span.getTracerId()));
+    }*/
     builder.setMethodName(call.md.getName());
     builder.setRequestParam(call.param != null);
     if (cellBlockMeta != null) {
index 94377c0..04f709f 100644 (file)
@@ -33,8 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.AsyncCallback;
 import org.apache.zookeeper.CreateMode;
@@ -156,11 +157,8 @@ public class RecoverableZooKeeper {
    * This function will not throw NoNodeException if the path does not
    * exist.
    */
-  public void delete(String path, int version)
-  throws InterruptedException, KeeperException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.delete");
+  public void delete(String path, int version) throws InterruptedException, KeeperException {
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.delete")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       boolean isRetry = false; // False for first attempt, true for all retries.
       while (true) {
@@ -197,8 +195,6 @@ public class RecoverableZooKeeper {
         retryCounter.sleepUntilNextRetry();
         isRetry = true;
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -206,11 +202,8 @@ public class RecoverableZooKeeper {
    * exists is an idempotent operation. Retry before throwing exception
    * @return A Stat instance
    */
-  public Stat exists(String path, Watcher watcher)
-  throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+  public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -236,8 +229,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -245,11 +236,8 @@ public class RecoverableZooKeeper {
    * exists is an idempotent operation. Retry before throwing exception
    * @return A Stat instance
    */
-  public Stat exists(String path, boolean watch)
-  throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+  public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -275,8 +263,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -297,9 +283,7 @@ public class RecoverableZooKeeper {
    */
   public List<String> getChildren(String path, Watcher watcher)
     throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -325,8 +309,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -336,9 +318,7 @@ public class RecoverableZooKeeper {
    */
   public List<String> getChildren(String path, boolean watch)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -364,8 +344,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -375,9 +353,7 @@ public class RecoverableZooKeeper {
    */
   public byte[] getData(String path, Watcher watcher, Stat stat)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -403,8 +379,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -414,9 +388,7 @@ public class RecoverableZooKeeper {
    */
   public byte[] getData(String path, boolean watch, Stat stat)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -442,8 +414,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -455,9 +425,7 @@ public class RecoverableZooKeeper {
    */
   public Stat setData(String path, byte[] data, int version)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.setData");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setData")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       byte[] newData = appendMetaData(id, data);
       boolean isRetry = false;
@@ -505,8 +473,6 @@ public class RecoverableZooKeeper {
         retryCounter.sleepUntilNextRetry();
         isRetry = true;
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -516,9 +482,7 @@ public class RecoverableZooKeeper {
    */
   public List<ACL> getAcl(String path, Stat stat)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getAcl")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -544,8 +508,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -555,9 +517,7 @@ public class RecoverableZooKeeper {
    */
   public Stat setAcl(String path, List<ACL> acls, int version)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.setAcl");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setAcl")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       while (true) {
         try {
@@ -583,8 +543,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -606,9 +564,7 @@ public class RecoverableZooKeeper {
   public String create(String path, byte[] data, List<ACL> acl,
       CreateMode createMode)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.create");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.create")) {
       byte[] newData = appendMetaData(id, data);
       switch (createMode) {
         case EPHEMERAL:
@@ -623,8 +579,6 @@ public class RecoverableZooKeeper {
           throw new IllegalArgumentException("Unrecognized CreateMode: " +
               createMode);
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -753,9 +707,7 @@ public class RecoverableZooKeeper {
    */
   public List<OpResult> multi(Iterable<Op> ops)
   throws KeeperException, InterruptedException {
-    TraceScope traceScope = null;
-    try {
-      traceScope = Trace.startSpan("RecoverableZookeeper.multi");
+    try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.multi")) {
       RetryCounter retryCounter = retryCounterFactory.create();
       Iterable<Op> multiOps = prepareZKMulti(ops);
       while (true) {
@@ -782,8 +734,6 @@ public class RecoverableZooKeeper {
         }
         retryCounter.sleepUntilNextRetry();
       }
-    } finally {
-      if (traceScope != null) traceScope.close();
     }
   }
 
index b732bbe..8c5d40c 100644 (file)
     <!-- tracing Dependencies -->
     <dependency>
       <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
+      <artifactId>htrace-core4</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
           <artifactId>hadoop-common</artifactId>
           <!--FYI This pulls in hadoop's guava. Its needed for Configuration
                at least-->
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
       <build>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
       <build>
index 55e53e3..b31a4f6 100644 (file)
 
 package org.apache.hadoop.hbase.trace;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.HTraceConfiguration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.HTraceConfiguration;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class HBaseHTraceConfiguration extends HTraceConfiguration {
-  private static final Log LOG =
-    LogFactory.getLog(HBaseHTraceConfiguration.class);
+  private static final Log LOG = LogFactory.getLog(HBaseHTraceConfiguration.class);
 
   public static final String KEY_PREFIX = "hbase.htrace.";
 
@@ -65,7 +64,7 @@ public class HBaseHTraceConfiguration extends HTraceConfiguration {
 
   @Override
   public String get(String key) {
-    return conf.get(KEY_PREFIX +key);
+    return conf.get(KEY_PREFIX + key);
   }
 
   @Override
index cb65f09..93a5fff 100644 (file)
@@ -24,10 +24,8 @@ import java.util.HashSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.SpanReceiver;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.SpanReceiver;
-import org.apache.htrace.SpanReceiverBuilder;
-import org.apache.htrace.Trace;
 
 /**
  * This class provides functions for reading the names of SpanReceivers from
@@ -62,6 +60,16 @@ public class SpanReceiverHost {
 
   }
 
+  public static Configuration getConfiguration(){
+    synchronized (SingletonHolder.INSTANCE.lock) {
+      if (SingletonHolder.INSTANCE.host == null || SingletonHolder.INSTANCE.host.conf == null) {
+        return null;
+      }
+
+      return SingletonHolder.INSTANCE.host.conf;
+    }
+  }
+
   SpanReceiverHost(Configuration conf) {
     receivers = new HashSet<>();
     this.conf = conf;
@@ -78,18 +86,18 @@ public class SpanReceiverHost {
       return;
     }
 
-    SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf));
+    SpanReceiver.Builder builder = new SpanReceiver.Builder(new HBaseHTraceConfiguration(conf));
     for (String className : receiverNames) {
       className = className.trim();
 
-      SpanReceiver receiver = builder.spanReceiverClass(className).build();
+      SpanReceiver receiver = builder.className(className).build();
       if (receiver != null) {
         receivers.add(receiver);
         LOG.info("SpanReceiver " + className + " was loaded successfully.");
       }
     }
     for (SpanReceiver rcvr : receivers) {
-      Trace.addReceiver(rcvr);
+      TraceUtil.addReceiver(rcvr);
     }
   }
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
new file mode 100644 (file)
index 0000000..d52c67d
--- /dev/null
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.trace;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.HTraceConfiguration;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanReceiver;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+
+/**
+ * This wrapper class provides functions for accessing htrace 4+ functionality in a simplified way.
+ */
+public final class TraceUtil {
+  private static HTraceConfiguration conf;
+  private static Tracer tracer;
+
+  private TraceUtil() {
+  }
+
+  public static void initTracer(Configuration c) {
+    if(c != null) {
+      conf = new HBaseHTraceConfiguration(c);
+    }
+
+    if (tracer == null && conf != null) {
+      tracer = new Tracer.Builder("Tracer").conf(conf).build();
+    }
+  }
+
+  /**
+   * Wrapper method to create new TraceScope with the given description
+   * @return TraceScope or null when not tracing
+   */
+  public static TraceScope createTrace(String description) {
+    return (tracer == null) ? null : tracer.newScope(description);
+  }
+
+  /**
+   * Wrapper method to create new child TraceScope with the given description
+   * and parent scope's spanId
+   * @param span parent span
+   * @return TraceScope or null when not tracing
+   */
+  public static TraceScope createTrace(String description, Span span) {
+    if(span == null) return createTrace(description);
+
+    return (tracer == null) ? null : tracer.newScope(description, span.getSpanId());
+  }
+
+  /**
+   * Wrapper method to add new sampler to the default tracer
+   * @return true if added, false if it was already added
+   */
+  public static boolean addSampler(Sampler sampler) {
+    if (sampler == null) {
+      return false;
+    }
+
+    return (tracer == null) ? false : tracer.addSampler(sampler);
+  }
+
+  /**
+   * Wrapper method to add key-value pair to TraceInfo of actual span
+   */
+  public static void addKVAnnotation(String key, String value){
+    Span span = Tracer.getCurrentSpan();
+    if (span != null) {
+      span.addKVAnnotation(key, value);
+    }
+  }
+
+  /**
+   * Wrapper method to add receiver to actual tracerpool
+   * @return true if successfull, false if it was already added
+   */
+  public static boolean addReceiver(SpanReceiver rcvr) {
+    return (tracer == null) ? false : tracer.getTracerPool().addReceiver(rcvr);
+  }
+
+  /**
+   * Wrapper method to remove receiver from actual tracerpool
+   * @return true if removed, false if doesn't exist
+   */
+  public static boolean removeReceiver(SpanReceiver rcvr) {
+    return (tracer == null) ? false : tracer.getTracerPool().removeReceiver(rcvr);
+  }
+
+  /**
+   * Wrapper method to add timeline annotiation to current span with given message
+   */
+  public static void addTimelineAnnotation(String msg) {
+    Span span = Tracer.getCurrentSpan();
+    if (span != null) {
+      span.addTimelineAnnotation(msg);
+    }
+  }
+
+  /**
+   * Wrap runnable with current tracer and description
+   * @param runnable to wrap
+   * @return wrapped runnable or original runnable when not tracing
+   */
+  public static Runnable wrap(Runnable runnable, String description) {
+    return (tracer == null) ? runnable : tracer.wrap(runnable, description);
+  }
+}
index 101bfdb..2a135c2 100644 (file)
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
index e706283..49f71e3 100644 (file)
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
       <build>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
       <build>
index 53708d8..845e8f3 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
+      <artifactId>htrace-core4</artifactId>
     </dependency>
     <dependency>
       <groupId>junit</groupId>
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
index d759367..c05499c 100644 (file)
@@ -1,3 +1,4 @@
+
 /**
  * Copyright The Apache Software Foundation
  *
 
 package org.apache.hadoop.hbase.io.hfile;
 
-import net.spy.memcached.CachedData;
-import net.spy.memcached.ConnectionFactoryBuilder;
-import net.spy.memcached.FailureMode;
-import net.spy.memcached.MemcachedClient;
-import net.spy.memcached.transcoders.Transcoder;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.concurrent.ExecutionException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Addressing;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.concurrent.ExecutionException;
+import net.spy.memcached.CachedData;
+import net.spy.memcached.ConnectionFactoryBuilder;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.transcoders.Transcoder;
 
 /**
  * Class to store blocks into memcached.
@@ -134,7 +135,7 @@ public class MemcachedBlockCache implements BlockCache {
     // Assume that nothing is the block cache
     HFileBlock result = null;
 
-    try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
+    try (TraceScope traceScope = TraceUtil.createTrace("MemcachedBlockCache.getBlock")) {
       result = client.get(cacheKey.toString(), tc);
     } catch (Exception e) {
       // Catch a pretty broad set of exceptions to limit any changes in the memecache client
index c314aca..1a13979 100644 (file)
@@ -170,6 +170,12 @@ limitations under the License.
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
       <version>${hadoop-two.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
index 42c8da7..0ee29e5 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
+      <artifactId>htrace-core4</artifactId>
     </dependency>
     <dependency>
       <groupId>javax.ws.rs</groupId>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
            <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
index 71e0d0b..503d4c1 100644 (file)
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.chaos.actions.Action;
 import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction;
 import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction;
@@ -62,20 +61,19 @@ import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
 import org.apache.hadoop.hbase.ipc.FatalConnectionException;
 import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.LoadTestTool;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.AlwaysSampler;
+import org.apache.htrace.core.AlwaysSampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
-
 /**
  * Integration test that should benchmark how fast HBase can recover from failures. This test starts
  * different threads:
@@ -268,7 +266,7 @@ public class IntegrationTestMTTR {
 
     loadTool = null;
   }
-  
+
   private static boolean tablesOnMaster() {
     boolean ret = true;
     String value = util.getConfiguration().get("hbase.balancer.tablesOnMaster");
@@ -369,7 +367,7 @@ public class IntegrationTestMTTR {
    */
   private static class TimingResult {
     DescriptiveStatistics stats = new DescriptiveStatistics();
-    ArrayList<Long> traces = new ArrayList<>(10);
+    ArrayList<String> traces = new ArrayList<>(10);
 
     /**
      * Add a result to this aggregate result.
@@ -377,9 +375,12 @@ public class IntegrationTestMTTR {
      * @param span Span.  To be kept if the time taken was over 1 second
      */
     public void addResult(long time, Span span) {
+      if (span == null) {
+        return;
+      }
       stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS));
       if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) {
-        traces.add(span.getTraceId());
+        traces.add(span.getTracerId());
       }
     }
 
@@ -419,12 +420,15 @@ public class IntegrationTestMTTR {
       final int maxIterations = 10;
       int numAfterDone = 0;
       int resetCount = 0;
+      TraceUtil.addSampler(AlwaysSampler.INSTANCE);
       // Keep trying until the rs is back up and we've gotten a put through
       while (numAfterDone < maxIterations) {
         long start = System.nanoTime();
-        TraceScope scope = null;
-        try {
-          scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
+        Span span = null;
+        try (TraceScope scope = TraceUtil.createTrace(getSpanName())) {
+          if (scope != null) {
+            span = scope.getSpan();
+          }
           boolean actionResult = doAction();
           if (actionResult && future.isDone()) {
             numAfterDone++;
@@ -470,12 +474,8 @@ public class IntegrationTestMTTR {
             LOG.info("Too many unexpected Exceptions. Aborting.", e);
             throw e;
           }
-        } finally {
-          if (scope != null) {
-            scope.close();
-          }
         }
-        result.addResult(System.nanoTime() - start, scope.getSpan());
+        result.addResult(System.nanoTime() - start, span);
       }
       return result;
     }
index 327d879..780c461 100644 (file)
@@ -35,9 +35,8 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -117,13 +116,12 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
 
       for (int i = 0; i < 100; i++) {
         Runnable runnable = new Runnable() {
-          private TraceScope innerScope = null;
           private final LinkedBlockingQueue<Long> rowKeyQueue = rks;
           @Override
           public void run() {
             ResultScanner rs = null;
-            try {
-              innerScope = Trace.startSpan("Scan", Sampler.ALWAYS);
+            TraceUtil.addSampler(Sampler.ALWAYS);
+            try (TraceScope scope = TraceUtil.createTrace("Scan")){
               Table ht = util.getConnection().getTable(tableName);
               Scan s = new Scan();
               s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
@@ -137,20 +135,15 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
                 accum |= Bytes.toLong(r.getRow());
               }
 
-              innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum);
+              TraceUtil.addTimelineAnnotation("Accum result = " + accum);
 
               ht.close();
               ht = null;
             } catch (IOException e) {
               e.printStackTrace();
-
-              innerScope.getSpan().addKVAnnotation(
-                  Bytes.toBytes("exception"),
-                  Bytes.toBytes(e.getClass().getSimpleName()));
-
+              TraceUtil.addKVAnnotation("exception", e.getClass().getSimpleName());
             } catch (Exception e) {
             } finally {
-              if (innerScope != null) innerScope.close();
               if (rs != null) rs.close();
             }
 
@@ -165,7 +158,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
       throws IOException {
     for (int i = 0; i < 100; i++) {
       Runnable runnable = new Runnable() {
-        private TraceScope innerScope = null;
         private final LinkedBlockingQueue<Long> rowKeyQueue = rowKeys;
 
         @Override
@@ -180,9 +172,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
           }
 
           long accum = 0;
+          TraceUtil.addSampler(Sampler.ALWAYS);
           for (int x = 0; x < 5; x++) {
-            try {
-              innerScope = Trace.startSpan("gets", Sampler.ALWAYS);
+            try (TraceScope scope = TraceUtil.createTrace("gets")) {
               long rk = rowKeyQueue.take();
               Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
               if (r1 != null) {
@@ -192,14 +184,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
               if (r2 != null) {
                 accum |= Bytes.toLong(r2.getRow());
               }
-              innerScope.getSpan().addTimelineAnnotation("Accum = " + accum);
+              TraceUtil.addTimelineAnnotation("Accum = " + accum);
 
-            } catch (IOException e) {
+            } catch (IOException|InterruptedException ie) {
               // IGNORED
-            } catch (InterruptedException ie) {
-              // IGNORED
-            } finally {
-              if (innerScope != null) innerScope.close();
             }
           }
 
@@ -210,25 +198,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
   }
 
   private void createTable() throws IOException {
-    TraceScope createScope = null;
-    try {
-      createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
+    TraceUtil.addSampler(Sampler.ALWAYS);
+    try (TraceScope scope = TraceUtil.createTrace("createTable")) {
       util.createTable(tableName, familyName);
-    } finally {
-      if (createScope != null) createScope.close();
     }
   }
 
   private void deleteTable() throws IOException {
-    TraceScope deleteScope = null;
-
-    try {
+    TraceUtil.addSampler(Sampler.ALWAYS);
+    try (TraceScope scope = TraceUtil.createTrace("deleteTable")) {
       if (admin.tableExists(tableName)) {
-        deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
         util.deleteTable(tableName);
       }
-    } finally {
-      if (deleteScope != null) deleteScope.close();
     }
   }
 
@@ -236,9 +217,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
     LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
     BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
     byte[] value = new byte[300];
+    TraceUtil.addSampler(Sampler.ALWAYS);
     for (int x = 0; x < 5000; x++) {
-      TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
-      try {
+      try (TraceScope traceScope = TraceUtil.createTrace("insertData")) {
         for (int i = 0; i < 5; i++) {
           long rk = random.nextLong();
           rowKeys.add(rk);
@@ -252,8 +233,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
         if ((x % 1000) == 0) {
           admin.flush(tableName);
         }
-      } finally {
-        traceScope.close();
       }
     }
     admin.flush(tableName);
index 42a50bc..883cda2 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
+      <artifactId>htrace-core4</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
       <scope>test</scope>
       <exclusions>
         <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>com.google.guava</groupId>
           <artifactId>guava</artifactId>
         </exclusion>
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>net.java.dev.jets3t</groupId>
               <artifactId>jets3t</artifactId>
             </exclusion>
           <artifactId>hadoop-hdfs</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>javax.servlet.jsp</groupId>
               <artifactId>jsp-api</artifactId>
             </exclusion>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
           <scope>test</scope>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
 
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <!--maven dependency:analyze says not needed but tests fail w/o-->
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 40e2cb9..31d33f2 100644 (file)
@@ -818,7 +818,7 @@ public class TableMapReduceUtil {
       com.google.protobuf.Message.class,
       org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class,
       org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
-      org.apache.htrace.Trace.class,
+      org.apache.htrace.core.Tracer.class,
       com.codahale.metrics.MetricRegistry.class,
       org.apache.commons.lang3.ArrayUtils.class,
       com.fasterxml.jackson.databind.ObjectMapper.class,
index bc36cde..2917605 100644 (file)
@@ -31,10 +31,10 @@ import java.util.Date;
 import java.util.LinkedList;
 import java.util.Locale;
 import java.util.Map;
+import java.util.NoSuchElementException;
 import java.util.Queue;
 import java.util.Random;
 import java.util.TreeMap;
-import java.util.NoSuchElementException;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -48,7 +48,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.AsyncConnection;
@@ -81,9 +80,17 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
+import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.*;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.ByteArrayHashKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Hash;
+import org.apache.hadoop.hbase.util.MurmurHash;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.YammerHistogramUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -93,17 +100,15 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
-import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.htrace.core.ProbabilitySampler;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import com.codahale.metrics.Histogram;
 import com.codahale.metrics.UniformReservoir;
-import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.MapperFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
 
 /**
  * Script used evaluating HBase performance and scalability.  Runs a HBase
@@ -1034,7 +1039,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
     protected final TestOptions opts;
 
     private final Status status;
-    private final Sampler<?> traceSampler;
+    private final Sampler traceSampler;
     private final SpanReceiverHost receiverHost;
 
     private String testName;
@@ -1182,17 +1187,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
     void testTimed() throws IOException, InterruptedException {
       int startRow = getStartRow();
       int lastRow = getLastRow();
+      TraceUtil.addSampler(traceSampler);
       // Report on completion of 1/10th of total.
       for (int ii = 0; ii < opts.cycles; ii++) {
         if (opts.cycles > 1) LOG.info("Cycle=" + ii + " of " + opts.cycles);
         for (int i = startRow; i < lastRow; i++) {
           if (i % everyN != 0) continue;
           long startTime = System.nanoTime();
-          TraceScope scope = Trace.startSpan("test row", traceSampler);
-          try {
+          try (TraceScope scope = TraceUtil.createTrace("test row");){
             testRow(i);
-          } finally {
-            scope.close();
           }
           if ( (i - startRow) > opts.measureAfter) {
             // If multiget is enabled, say set to 10, testRow() returns immediately first 9 times
index 764457a..bb9ce84 100644 (file)
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 4f52bba..1676691 100644 (file)
       <artifactId>junit</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core4</artifactId>
+    </dependency>
   </dependencies>
   <profiles>
     <!-- Skip the tests in this module -->
index a56a470..942fd8c 100644 (file)
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>net.java.dev.jets3t</groupId>
               <artifactId>jets3t</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 78855df..bc2eb93 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-server</artifactId>
     </dependency>
     <dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
index ee75ef9..9b0bfe7 100644 (file)
           <artifactId>hadoop-common</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>net.java.dev.jets3t</groupId>
               <artifactId>jets3t</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
     </profile>
index 1a4689e..1e5a1f3 100644 (file)
     <!-- tracing Dependencies -->
     <dependency>
       <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core4</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.htrace</groupId>
       <artifactId>htrace-core</artifactId>
+      <version>${htrace-hadoop.version}</version>
     </dependency>
     <dependency>
       <groupId>com.lmax</groupId>
index cfe3d61..1056c20 100644 (file)
@@ -23,11 +23,12 @@ import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.Server;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Abstract base class for all HBase event handlers. Subclasses should
@@ -74,7 +75,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
    * Default base class constructor.
    */
   public EventHandler(Server server, EventType eventType) {
-    this.parent = Trace.currentSpan();
+    this.parent = Tracer.getCurrentSpan();
     this.server = server;
     this.eventType = eventType;
     seqid = seqids.incrementAndGet();
@@ -99,13 +100,10 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
 
   @Override
   public void run() {
-    TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
-    try {
+    try (TraceScope scope = TraceUtil.createTrace(this.getClass().getSimpleName(), parent)) {
       process();
     } catch(Throwable t) {
       handleException(t);
-    } finally {
-      chunk.close();
     }
   }
 
index 9e29023..f216f42 100644 (file)
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ByteBufferKeyValue;
 import org.apache.hadoop.hbase.SizeCachedKeyValue;
 import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
@@ -59,8 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.IdLock;
 import org.apache.hadoop.hbase.util.ObjectIntPair;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
@@ -255,6 +255,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
     // Prefetch file blocks upon open if requested
     if (cacheConf.shouldPrefetchOnOpen()) {
       PrefetchExecutor.request(path, new Runnable() {
+        @Override
         public void run() {
           long offset = 0;
           long end = 0;
@@ -436,6 +437,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
    * @return the total heap size of data and meta block indexes in bytes. Does
    *         not take into account non-root blocks of a multilevel data index.
    */
+  @Override
   public long indexSize() {
     return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
         + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
@@ -1239,6 +1241,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
     }
   }
 
+  @Override
   public Path getPath() {
     return path;
   }
@@ -1276,10 +1279,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
   protected boolean decodeMemstoreTS = false;
 
 
+  @Override
   public boolean isDecodeMemStoreTS() {
     return this.decodeMemstoreTS;
   }
 
+  @Override
   public boolean shouldIncludeMemStoreTS() {
     return includesMemstoreTS;
   }
@@ -1437,8 +1442,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
 
     boolean useLock = false;
     IdLock.Entry lockEntry = null;
-    TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock");
-    try {
+    try (TraceScope traceScope = TraceUtil.createTrace("HFileReaderImpl.readBlock")) {
       while (true) {
         // Check cache for block. If found return.
         if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
@@ -1453,9 +1457,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
             if (LOG.isTraceEnabled()) {
               LOG.trace("From Cache " + cachedBlock);
             }
-            if (Trace.isTracing()) {
-              traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
-            }
+            TraceUtil.addTimelineAnnotation("blockCacheHit");
             assert cachedBlock.isUnpacked() : "Packed block leak.";
             if (cachedBlock.getBlockType().isData()) {
               if (updateCacheMetrics) {
@@ -1481,9 +1483,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
           // Carry on, please load.
         }
 
-        if (Trace.isTracing()) {
-          traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
-        }
+        TraceUtil.addTimelineAnnotation("blockCacheMiss");
         // Load block from filesystem.
         HFileBlock hfileBlock =
             fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction);
@@ -1505,7 +1505,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
         return unpacked;
       }
     } finally {
-      traceScope.close();
       if (lockEntry != null) {
         offsetLock.releaseLockEntry(lockEntry);
       }
@@ -1568,6 +1567,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
     close(cacheConf.shouldEvictOnClose());
   }
 
+  @Override
   public void close(boolean evictOnClose) throws IOException {
     PrefetchExecutor.cancel(path);
     if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
@@ -1580,11 +1580,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
     fsBlockReader.closeStreams();
   }
 
+  @Override
   public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
     return dataBlockEncoder.getEffectiveEncodingInCache(isCompaction);
   }
 
   /** For testing */
+  @Override
   public HFileBlock.FSReader getUncachedBlockReader() {
     return fsBlockReader;
   }
@@ -1612,6 +1614,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
       return curBlock != null;
     }
 
+    @Override
     public void setNonSeekedState() {
       reset();
     }
@@ -1713,6 +1716,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
       }
     }
 
+    @Override
     protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
       return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
     }
@@ -1730,6 +1734,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
       return seeker.seekToKeyInBlock(key, seekBefore);
     }
 
+    @Override
     public int compareKey(CellComparator comparator, Cell key) {
       return seeker.compareKey(comparator, key);
     }
@@ -1776,6 +1781,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
    * Returns false if block prefetching was requested for this file and has
    * not completed, true otherwise
    */
+  @Override
   @VisibleForTesting
   public boolean prefetchComplete() {
     return PrefetchExecutor.isCompleted(path);
index d4fc706..141674d 100644 (file)
@@ -24,6 +24,7 @@ import java.util.Optional;
 import org.apache.hadoop.hbase.CallDroppedException;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
@@ -32,8 +33,6 @@ import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
 
 /**
  * The request processing logic, which is usually executed in thread pools provided by an
@@ -116,20 +115,17 @@ public class CallRunner {
       String error = null;
       Pair<Message, CellScanner> resultPair = null;
       RpcServer.CurCall.set(call);
-      TraceScope traceScope = null;
       try {
         if (!this.rpcServer.isStarted()) {
           InetSocketAddress address = rpcServer.getListenerAddress();
           throw new ServerNotRunningYetException("Server " +
               (address != null ? address : "(channel closed)") + " is not running yet");
         }
-        if (call.getTraceInfo() != null) {
-          String serviceName =
-              call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
-          String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
-          String traceString = serviceName + "." + methodName;
-          traceScope = Trace.startSpan(traceString, call.getTraceInfo());
-        }
+        String serviceName =
+            call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
+        String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
+        String traceString = serviceName + "." + methodName;
+        TraceUtil.createTrace(traceString);
         // make the call
         resultPair = this.rpcServer.call(call, this.status);
       } catch (TimeoutIOException e){
@@ -150,9 +146,6 @@ public class CallRunner {
           throw (Error)e;
         }
       } finally {
-        if (traceScope != null) {
-          traceScope.close();
-        }
         RpcServer.CurCall.set(null);
         if (resultPair != null) {
           this.rpcServer.addCallSize(call.getSize() * -1);
index 7fd4736..f86fa77 100644 (file)
@@ -181,7 +181,7 @@ public class NettyRpcServer extends RpcServer {
       Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
       long startTime, int timeout) throws IOException {
     NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null,
-        -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
+        -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
     return call(fakeCall, status);
   }
 }
index 7dfdc72..70b9da3 100644 (file)
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Datastructure that holds all necessary to a method invocation and then afterward, carries the
@@ -40,9 +39,9 @@ class NettyServerCall extends ServerCall<NettyServerRpcConnection> {
 
   NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
       Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size,
-      TraceInfo tinfo, InetAddress remoteAddress, long receiveTime, int timeout,
+      InetAddress remoteAddress, long receiveTime, int timeout,
       ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
-    super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
+    super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
         receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
   }
 
index 21c7f51..a91aafb 100644 (file)
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * RpcConnection implementation for netty rpc server.
@@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection {
   @Override
   public NettyServerCall createCall(int id, final BlockingService service,
       final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
-      long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
+      long size, final InetAddress remoteAddress, int timeout,
       CallCleanup reqCleanup) {
-    return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+    return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
         remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
         this.rpcServer.cellBlockBuilder, reqCleanup);
   }
index 3562d86..51b1684 100644 (file)
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Interface of all necessary to carry out a RPC method invocation on the server.
@@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext {
    * @return A short string format of this call without possibly lengthy params
    */
   String toShortString();
-
-  /**
-   * @return TraceInfo attached to this call.
-   */
-  TraceInfo getTraceInfo();
 }
index 60fe30e..2fca7f1 100644 (file)
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Datastructure that holds all necessary to a method invocation and then afterward, carries
@@ -79,7 +78,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
 
   protected final long size;                          // size of current call
   protected boolean isError;
-  protected final TraceInfo tinfo;
   protected ByteBufferListOutputStream cellBlockStream = null;
   protected CallCleanup reqCleanup = null;
 
@@ -96,7 +94,7 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
       justification="Can't figure why this complaint is happening... see below")
   ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
-      Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo,
+      Message param, CellScanner cellScanner, T connection, long size,
       InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
       CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
     this.id = id;
@@ -110,7 +108,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
     this.response = null;
     this.isError = false;
     this.size = size;
-    this.tinfo = tinfo;
     if (connection != null) {
       this.user =  connection.user;
       this.retryImmediatelySupported = connection.retryImmediatelySupported;
@@ -507,11 +504,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
   }
 
   @Override
-  public TraceInfo getTraceInfo() {
-    return tinfo;
-  }
-
-  @Override
   public synchronized BufferChain getResponse() {
     return response;
   }
index e1ac741..4d0239f 100644 (file)
@@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.htrace.TraceInfo;
 
 /** Reads calls from a connection and queues them for handling. */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(
@@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable {
     if ((totalRequestSize +
         this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
       final ServerCall<?> callTooBig = createCall(id, this.service, null, null, null, null,
-        totalRequestSize, null, null, 0, this.callCleanup);
+        totalRequestSize, null, 0, this.callCleanup);
       this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
       callTooBig.setResponse(null, null,  RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
         "Call queue is full on " + this.rpcServer.server.getServerName() +
@@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable {
       }
 
       ServerCall<?> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
-        totalRequestSize, null, null, 0, this.callCleanup);
+        totalRequestSize, null, 0, this.callCleanup);
       readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
       readParamsFailedCall.sendResponseIfReady();
       return;
     }
 
-    TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header
-        .getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
-        : null;
     int timeout = 0;
     if (header.hasTimeout() && header.getTimeout() > 0) {
       timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
     }
     ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
-      traceInfo, this.addr, timeout, this.callCleanup);
+      this.addr, timeout, this.callCleanup);
 
     if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
       this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
@@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable {
   public abstract boolean isConnectionOpen();
 
   public abstract ServerCall<?> createCall(int id, BlockingService service, MethodDescriptor md,
-      RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+      RequestHeader header, Message param, CellScanner cellScanner, long size,
       InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
 
   private static class ByteBuffByteInput extends ByteInput {
index 69cc48d..36ae74a 100644 (file)
@@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer {
       Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
       long startTime, int timeout) throws IOException {
     SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
-        null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
+        null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
     return call(fakeCall, status);
   }
 
index 5a26c05..46295fd 100644 (file)
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Datastructure that holds all necessary to a method invocation and then afterward, carries the
@@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall<SimpleServerRpcConnection> {
       justification = "Can't figure why this complaint is happening... see below")
   SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
       RequestHeader header, Message param, CellScanner cellScanner,
-      SimpleServerRpcConnection connection, long size, TraceInfo tinfo,
+      SimpleServerRpcConnection connection, long size,
       final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
       CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
-    super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
+    super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
         receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
     this.responder = responder;
   }
index 68545f3..c8dfe4a 100644 (file)
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.htrace.TraceInfo;
 
 /** Reads calls from a connection and queues them for handling. */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
@@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
 
           // Notify the client about the offending request
           SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
-              null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0,
+              null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
               this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
           this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
           // Make sure the client recognizes the underlying exception
@@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
 
   @Override
   public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
-      RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+      RequestHeader header, Message param, CellScanner cellScanner, long size,
       InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
-    return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+    return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
         remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
         this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
   }
index 91c5218..cad77e5 100644 (file)
@@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
@@ -470,6 +471,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   public HMaster(final Configuration conf)
       throws IOException, KeeperException {
     super(conf);
+    TraceUtil.initTracer(conf);
     try {
       this.rsFatals = new MemoryBoundedLogMessageBuffer(
           conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
index f9a441d..093412a 100644 (file)
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -147,6 +148,8 @@ public class HMasterCommandLine extends ServerCommandLine {
 
   private int startMaster() {
     Configuration conf = getConf();
+    TraceUtil.initTracer(conf);
+
     try {
       // If 'local', defer to LocalHBaseCluster instance.  Starts master
       // and regionserver both in the one JVM.
index 492325e..a265a55 100644 (file)
@@ -91,11 +91,11 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
@@ -149,8 +149,31 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.shaded.com.google.common.io.Closeables;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -172,33 +195,9 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.shaded.com.google.common.io.Closeables;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
-
 import edu.umd.cs.findbugs.annotations.Nullable;
 
 /**
@@ -3733,6 +3732,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
   }
 
+  @Override
   public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
     return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
   }
@@ -5566,16 +5566,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     RowLockContext rowLockContext = null;
     RowLockImpl result = null;
-    TraceScope traceScope = null;
-
-    // If we're tracing start a span to show how long this took.
-    if (Trace.isTracing()) {
-      traceScope = Trace.startSpan("HRegion.getRowLock");
-      traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
-    }
 
     boolean success = false;
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) {
+      TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
       // Keep trying until we have a lock or error out.
       // TODO: do we need to add a time component here?
       while (result == null) {
@@ -5604,9 +5598,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       }
 
       if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
-        if (traceScope != null) {
-          traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
-        }
+        TraceUtil.addTimelineAnnotation("Failed to get row lock");
         result = null;
         String message = "Timed out waiting for lock for row: " + rowKey + " in region "
             + getRegionInfo().getEncodedName();
@@ -5624,9 +5616,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
       InterruptedIOException iie = new InterruptedIOException();
       iie.initCause(ie);
-      if (traceScope != null) {
-        traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
-      }
+      TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock");
       Thread.currentThread().interrupt();
       throw iie;
     } finally {
@@ -5634,9 +5624,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       if (!success && rowLockContext != null) {
         rowLockContext.cleanUp();
       }
-      if (traceScope != null) {
-        traceScope.close();
-      }
     }
   }
 
index 6ad595f..4c34fe0 100644 (file)
@@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
@@ -526,6 +527,7 @@ public class HRegionServer extends HasThread implements
   // Defer till after we register with the Master as much as possible. See #startServices.
   public HRegionServer(Configuration conf) throws IOException {
     super("RegionServer");  // thread name
+    TraceUtil.initTracer(conf);
     try {
       this.startcode = System.currentTimeMillis();
       this.conf = conf;
index 1212668..c2e1111 100644 (file)
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +51,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
 
   private int start() throws Exception {
     Configuration conf = getConf();
+    TraceUtil.initTracer(conf);
     try {
       // If 'local', don't start a region server here. Defer to
       // LocalHBaseCluster. It manages 'local' clusters.
index ae4c8eb..a314848 100644 (file)
@@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -51,12 +53,9 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-
 /**
  * Thread that flushes cache on request
  *
@@ -447,7 +446,7 @@ class MemStoreFlusher implements FlushRequester {
             "store files; delaying flush up to " + this.blockingWaitTime + "ms");
           if (!this.server.compactSplitThread.requestSplit(region)) {
             try {
-              this.server.compactSplitThread.requestSystemCompaction((HRegion) region,
+              this.server.compactSplitThread.requestSystemCompaction(region,
                 Thread.currentThread().getName());
             } catch (IOException e) {
               e = e instanceof RemoteException ?
@@ -572,12 +571,10 @@ class MemStoreFlusher implements FlushRequester {
    * amount of memstore consumption.
    */
   public void reclaimMemStoreMemory() {
-    TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
+    TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory");
     FlushType flushType = isAboveHighWaterMark();
     if (flushType != FlushType.NORMAL) {
-      if (Trace.isTracing()) {
-        scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
-      }
+      TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
       long start = EnvironmentEdgeManager.currentTime();
       synchronized (this.blockSignal) {
         boolean blocked = false;
@@ -640,7 +637,9 @@ class MemStoreFlusher implements FlushRequester {
     } else if (isAboveLowWaterMark() != FlushType.NORMAL) {
       wakeupFlushThread();
     }
-    scope.close();
+    if(scope!= null) {
+      scope.close();
+    }
   }
 
   private void logMsg(String string1, long val, long max) {
index ad54cab..f7fbd86 100644 (file)
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -72,14 +74,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
 import com.lmax.disruptor.RingBuffer;
 
 /**
@@ -681,8 +679,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
    * @throws IOException if there is a problem flushing or closing the underlying FS
    */
   Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
-    TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) {
       long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
       int oldNumEntries = this.numEntries.getAndSet(0);
       final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
@@ -696,16 +693,16 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
         LOG.info("New WAL " + newPathString);
       }
       return newPath;
-    } finally {
-      scope.close();
     }
   }
 
   protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
     // Now we have published the ringbuffer, halt the current thread until we get an answer back.
     try {
-      syncFuture.get(walSyncTimeoutNs);
-      return syncFuture.getSpan();
+      if (syncFuture != null) {
+        syncFuture.get(walSyncTimeoutNs);
+      }
+      return (syncFuture == null) ? null : syncFuture.getSpan();
     } catch (TimeoutIOException tioe) {
       // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
       // still refer to it, so if this thread use it next time may get a wrong
@@ -748,8 +745,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
         LOG.debug("WAL closing. Skipping rolling of writer");
         return regionsToFlush;
       }
-      TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
-      try {
+      try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) {
         Path oldPath = getOldPath();
         Path newPath = getNewPath();
         // Any exception from here on is catastrophic, non-recoverable so we currently abort.
@@ -774,8 +770,6 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
             "for details.", exception);
       } finally {
         closeBarrier.endOp();
-        assert scope == NullScope.INSTANCE || !scope.isDetached();
-        scope.close();
       }
       return regionsToFlush;
     } finally {
@@ -950,7 +944,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
     if (timeInNanos > this.slowSyncNs) {
       String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
           .append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
-      Trace.addTimelineAnnotation(msg);
+      TraceUtil.addTimelineAnnotation(msg);
       LOG.info(msg);
     }
     if (!listeners.isEmpty()) {
@@ -966,16 +960,20 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
     if (this.closed) {
       throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
     }
-    TraceScope scope = Trace.startSpan(implClassName + ".append");
     MutableLong txidHolder = new MutableLong();
     MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
       txidHolder.setValue(ringBuffer.next());
     });
     long txid = txidHolder.longValue();
-    try {
+    try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
       FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
       entry.stampRegionSequenceId(we);
-      ringBuffer.get(txid).load(entry, scope.detach());
+      if(scope!=null){
+        ringBuffer.get(txid).load(entry, scope.getSpan());
+      }
+      else{
+        ringBuffer.get(txid).load(entry, null);
+      }
     } finally {
       ringBuffer.publish(txid);
     }
index cff3f70..d4e113a 100644 (file)
@@ -44,6 +44,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
@@ -52,18 +54,14 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.yetus.audience.InterfaceAudience;
-
 import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
 import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
 import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 
 import com.lmax.disruptor.RingBuffer;
 import com.lmax.disruptor.Sequence;
@@ -342,9 +340,9 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
   }
 
   private void addTimeAnnotation(SyncFuture future, String annotation) {
-    TraceScope scope = Trace.continueSpan(future.getSpan());
-    Trace.addTimelineAnnotation(annotation);
-    future.setSpan(scope.detach());
+    TraceUtil.addTimelineAnnotation(annotation);
+    //TODO handle htrace API change, see HBASE-18895
+    //future.setSpan(scope.getSpan());
   }
 
   private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
@@ -415,14 +413,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
       Span span = entry.detachSpan();
       // the span maybe null if this is a retry after rolling.
       if (span != null) {
-        TraceScope scope = Trace.continueSpan(span);
+        //TODO handle htrace API change, see HBASE-18895
+        //TraceScope scope = Trace.continueSpan(span);
         try {
           appended = append(writer, entry);
         } catch (IOException e) {
           throw new AssertionError("should not happen", e);
         } finally {
-          assert scope == NullScope.INSTANCE || !scope.isDetached();
-          scope.close(); // append scope is complete
+          //TODO handle htrace API change, see HBASE-18895
+          //assert scope == NullScope.INSTANCE || !scope.isDetached();
+          //scope.close(); // append scope is complete
         }
       } else {
         try {
@@ -559,24 +559,26 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
 
   @Override
   public void sync() throws IOException {
-    TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){
       long txid = waitingConsumePayloads.next();
-      SyncFuture future;
+      SyncFuture future = null;
       try {
-        future = getSyncFuture(txid, scope.detach());
-        RingBufferTruck truck = waitingConsumePayloads.get(txid);
-        truck.load(future);
+        if (scope != null) {
+          future = getSyncFuture(txid, scope.getSpan());
+          RingBufferTruck truck = waitingConsumePayloads.get(txid);
+          truck.load(future);
+        }
       } finally {
         waitingConsumePayloads.publish(txid);
       }
       if (shouldScheduleConsumer()) {
         eventLoop.execute(consumer);
       }
-      scope = Trace.continueSpan(blockOnSync(future));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+      //TODO handle htrace API change, see HBASE-18895
+      //scope = Trace.continueSpan(blockOnSync(future));
+      if (future != null) {
+        blockOnSync(future);
+      }
     }
   }
 
@@ -585,25 +587,27 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
     if (highestSyncedTxid.get() >= txid) {
       return;
     }
-    TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) {
       // here we do not use ring buffer sequence as txid
       long sequence = waitingConsumePayloads.next();
-      SyncFuture future;
+      SyncFuture future = null;
       try {
-        future = getSyncFuture(txid, scope.detach());
-        RingBufferTruck truck = waitingConsumePayloads.get(sequence);
-        truck.load(future);
+        if(scope!= null) {
+          future = getSyncFuture(txid, scope.getSpan());
+          RingBufferTruck truck = waitingConsumePayloads.get(sequence);
+          truck.load(future);
+        }
       } finally {
         waitingConsumePayloads.publish(sequence);
       }
       if (shouldScheduleConsumer()) {
         eventLoop.execute(consumer);
       }
-      scope = Trace.continueSpan(blockOnSync(future));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+      //TODO handle htrace API change, see HBASE-18895
+      //scope = Trace.continueSpan(blockOnSync(future));
+      if (future != null) {
+        blockOnSync(future);
+      }
     }
   }
 
index cc9601b..c4e23da 100644 (file)
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -54,10 +55,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
@@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           // use assert to make sure no change breaks the logic that
           // sequence and zigzagLatch will be set together
           assert sequence > 0L : "Failed to get sequence from ring buffer";
-          Trace.addTimelineAnnotation("awaiting safepoint");
+          TraceUtil.addTimelineAnnotation("awaiting safepoint");
           syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
         }
       } catch (FailedSyncBeforeLogCloseException e) {
@@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
       if (this.writer != null) {
         oldFileLen = this.writer.getLength();
         try {
-          Trace.addTimelineAnnotation("closing writer");
+          TraceUtil.addTimelineAnnotation("closing writer");
           this.writer.close();
-          Trace.addTimelineAnnotation("writer closed");
+          TraceUtil.addTimelineAnnotation("writer closed");
           this.closeErrorCount.set(0);
         } catch (IOException ioe) {
           int errors = closeErrorCount.incrementAndGet();
@@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           }
           // I got something. Lets run. Save off current sequence number in case it changes
           // while we run.
-          TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
+          //TODO handle htrace API change, see HBASE-18895
+          //TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
           long start = System.nanoTime();
           Throwable lastException = null;
           try {
-            Trace.addTimelineAnnotation("syncing writer");
+            TraceUtil.addTimelineAnnotation("syncing writer");
             writer.sync();
-            Trace.addTimelineAnnotation("writer synced");
+            TraceUtil.addTimelineAnnotation("writer synced");
             currentSequence = updateHighestSyncedSequence(currentSequence);
           } catch (IOException e) {
             LOG.error("Error syncing, request close of WAL", e);
@@ -611,7 +611,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
             lastException = e;
           } finally {
             // reattach the span to the future before releasing.
-            takeSyncFuture.setSpan(scope.detach());
+            //TODO handle htrace API change, see HBASE-18895
+            // takeSyncFuture.setSpan(scope.getSpan());
             // First release what we 'took' from the queue.
             syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
             // Can we release other syncs?
@@ -727,8 +728,15 @@ public class FSHLog extends AbstractFSWAL<Writer> {
   }
 
   // Sync all known transactions
-  private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
-    return blockOnSync(publishSyncOnRingBuffer(span));
+  private void publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException {
+    if (scope != null) {
+      SyncFuture syncFuture = publishSyncOnRingBuffer(scope.getSpan());
+      blockOnSync(syncFuture);
+    }
+    else {
+      SyncFuture syncFuture = publishSyncOnRingBuffer(null);
+      blockOnSync(syncFuture);
+    }
   }
 
   /**
@@ -754,12 +762,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
 
   @Override
   public void sync() throws IOException {
-    TraceScope scope = Trace.startSpan("FSHLog.sync");
-    try {
-      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+    try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+      publishSyncThenBlockOnCompletion(scope);
     }
   }
 
@@ -769,12 +773,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
       // Already sync'd.
       return;
     }
-    TraceScope scope = Trace.startSpan("FSHLog.sync");
-    try {
-      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+    try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+      publishSyncThenBlockOnCompletion(scope);
     }
   }
 
@@ -996,7 +996,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           }
         } else if (truck.type() == RingBufferTruck.Type.APPEND) {
           FSWALEntry entry = truck.unloadAppend();
-          TraceScope scope = Trace.continueSpan(entry.detachSpan());
+          //TODO handle htrace API change, see HBASE-18895
+          //TraceScope scope = Trace.continueSpan(entry.detachSpan());
           try {
 
             if (this.exception != null) {
@@ -1015,9 +1016,6 @@ public class FSHLog extends AbstractFSWAL<Writer> {
                     : new DamagedWALException("On sync", this.exception));
             // Return to keep processing events coming off the ringbuffer
             return;
-          } finally {
-            assert scope == NullScope.INSTANCE || !scope.isDetached();
-            scope.close(); // append scope is complete
           }
         } else {
           // What is this if not an append or sync. Fail all up to this!!!
index 03ef008..debe9e4 100644 (file)
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
index a63b281..021f6a1 100644 (file)
@@ -18,8 +18,8 @@
  */
 package org.apache.hadoop.hbase.regionserver.wal;
 
+import org.apache.htrace.core.Span;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.Span;
 
 /**
  * A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a
index 13d103b..0dbd020 100644 (file)
@@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
 
 /**
  * A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the
index 0a1c60f..3b3d568 100644 (file)
@@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
@@ -657,6 +658,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
         setLevel(org.apache.log4j.Level.ERROR);
 
+    TraceUtil.initTracer(conf);
 
     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
       true, null, null, hosts, null);
@@ -1125,6 +1127,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
 
     Configuration c = new Configuration(this.conf);
+    TraceUtil.initTracer(c);
     this.hbaseCluster =
         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
     // Don't leave here till we've done a successful scan of the hbase:meta
index bb91770..8521e65 100644 (file)
@@ -563,7 +563,7 @@ public class TestSimpleRpcScheduler {
     ServerCall putCall = new ServerCall(1, null, null,
         RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
         RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
-        null, null, 9, null, null, timestamp, 0, null, null, null) {
+        null, null, 9, null, timestamp, 0, null, null, null) {
 
       @Override
       public void sendResponseIfReady() throws IOException {
index 63bcbdf..33f77ef 100644 (file)
 package org.apache.hadoop.hbase.trace;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import java.lang.reflect.Method;
-import java.util.Collection;
-
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.TraceTree;
-import org.apache.htrace.impl.POJOSpanReceiver;
+import org.apache.htrace.core.POJOSpanReceiver;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+import org.apache.htrace.core.TraceScope;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -44,103 +40,84 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+
 @Category({MiscTests.class, MediumTests.class})
 public class TestHTraceHooks {
 
   private static final byte[] FAMILY_BYTES = "family".getBytes();
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static POJOSpanReceiver rcvr;
-  private static long ROOT_SPAN_ID = 0;
+  private static SpanId ROOT_SPAN_ID = new SpanId(0, 0);
 
   @Rule
   public TestName name = new TestName();
 
   @BeforeClass
   public static void before() throws Exception {
-
-    // Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32
-    // to find out to detect if we are using HTrace 3.2 or not.
-    try {
-        Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class);
-    } catch (NoSuchMethodException e) {
-      ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2
-    }
-
     TEST_UTIL.startMiniCluster(2, 3);
     rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
-    Trace.addReceiver(rcvr);
+    TraceUtil.addReceiver(rcvr);
+    TraceUtil.addSampler(new Sampler() {
+      @Override
+      public boolean next() {
+        return true;
+      }
+    });
   }
 
   @AfterClass
   public static void after() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
-    Trace.removeReceiver(rcvr);
+    TraceUtil.removeReceiver(rcvr);
     rcvr = null;
   }
 
   @Test
   public void testTraceCreateTable() throws Exception {
-    TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
     Table table;
-    try {
-
+    Span createTableSpan;
+    try (TraceScope scope = TraceUtil.createTrace("creating table")) {
+      createTableSpan = scope.getSpan();
       table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
-    } finally {
-      tableCreationSpan.close();
     }
 
     // Some table creation is async.  Need to make sure that everything is full in before
     // checking to see if the spans are there.
-    TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return rcvr.getSpans().size() >= 5;
+    TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+      @Override public boolean evaluate() throws Exception {
+        return (rcvr == null) ? true : rcvr.getSpans().size() >= 5;
       }
     });
 
-    Collection<Span> spans = rcvr.getSpans();
+    Collection<Span> spans = Sets.newHashSet(rcvr.getSpans());
+    List<Span> roots = new LinkedList<>();
     TraceTree traceTree = new TraceTree(spans);
-    Collection<Span> roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
-
-    assertEquals(1, roots.size());
-    Span createTableRoot = roots.iterator().next();
+    roots.addAll(traceTree.getSpansByParent().find(createTableSpan.getSpanId()));
 
-    assertEquals("creating table", createTableRoot.getDescription());
+    assertEquals(3, roots.size());
+    assertEquals("creating table", createTableSpan.getDescription());
 
-    int createTableCount = 0;
-
-    for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
-      if (s.getDescription().startsWith("MasterService.CreateTable")) {
-        createTableCount++;
-      }
+    if (spans != null) {
+      assertTrue(spans.size() > 5);
     }
 
-    assertTrue(createTableCount >= 1);
-    assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
-    assertTrue(spans.size() > 5);
-    
     Put put = new Put("row".getBytes());
     put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
 
-    TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
-    try {
+    Span putSpan;
+
+    try (TraceScope scope = TraceUtil.createTrace("doing put")) {
+      putSpan = scope.getSpan();
       table.put(put);
-    } finally {
-      putSpan.close();
     }
 
     spans = rcvr.getSpans();
     traceTree = new TraceTree(spans);
-    roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
-
-    assertEquals(2, roots.size());
-    Span putRoot = null;
-    for (Span root : roots) {
-      if (root.getDescription().equals("doing put")) {
-        putRoot = root;
-      }
-    }
-    
-    assertNotNull(putRoot);
+    roots.clear();
+    roots.addAll(traceTree.getSpansByParent().find(putSpan.getSpanId()));
+    assertEquals(1, roots.size());
   }
 }
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
new file mode 100644 (file)
index 0000000..bba4ee5
--- /dev/null
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.trace;
+
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Used to create the graph formed by spans.
+ */
+public class TraceTree {
+
+  public static class SpansByParent {
+    private final Set<Span> set;
+
+    private final HashMap<SpanId, LinkedList<Span>> parentToSpans;
+
+    SpansByParent(Collection<Span> spans) {
+      set = new LinkedHashSet<Span>();
+      parentToSpans = new HashMap<SpanId, LinkedList<Span>>();
+      if(spans == null) {
+        return;
+      }
+      for (Span span : spans) {
+        set.add(span);
+        for (SpanId parent : span.getParents()) {
+          LinkedList<Span> list = parentToSpans.get(parent);
+          if (list == null) {
+            list = new LinkedList<Span>();
+            parentToSpans.put(parent, list);
+          }
+          list.add(span);
+        }
+        if (span.getParents().length == 0) {
+          LinkedList<Span> list = parentToSpans.get(Long.valueOf(0L));
+          if (list == null) {
+            list = new LinkedList<Span>();
+            parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list);
+          }
+          list.add(span);
+        }
+      }
+
+    }
+
+    public List<Span> find(SpanId parentId) {
+      LinkedList<Span> spans = parentToSpans.get(parentId);
+      if (spans == null) {
+        return new LinkedList<Span>();
+      }
+      return spans;
+    }
+
+    public Iterator<Span> iterator() {
+      return Collections.unmodifiableSet(set).iterator();
+    }
+  }
+
+  public static class SpansByProcessId {
+    private final Set<Span> set;
+
+    SpansByProcessId(Collection<Span> spans) {
+      set = new LinkedHashSet<Span>();
+      if(spans == null) {
+        return;
+      }
+      for (Span span : spans) {
+        set.add(span);
+      }
+    }
+
+    public Iterator<Span> iterator() {
+      return Collections.unmodifiableSet(set).iterator();
+    }
+  }
+
+  private final SpansByParent spansByParent;
+  private final SpansByProcessId spansByProcessId;
+
+  /**
+   * Create a new TraceTree
+   *
+   * @param spans The collection of spans to use to create this TraceTree. Should
+   *              have at least one root span.
+   */
+  public TraceTree(Collection<Span> spans) {
+    this.spansByParent = new SpansByParent(spans);
+    this.spansByProcessId = new SpansByProcessId(spans);
+  }
+
+  public SpansByParent getSpansByParent() {
+    return spansByParent;
+  }
+
+  public SpansByProcessId getSpansByProcessId() {
+    return spansByProcessId;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder bld = new StringBuilder();
+    String prefix = "";
+    for (Iterator<Span> iter = spansByParent.iterator(); iter.hasNext();) {
+      Span span = iter.next();
+      bld.append(prefix).append(span.toString());
+      prefix = "\n";
+    }
+    return bld.toString();
+  }
+}
index 75aba03..9a5c19d 100644 (file)
@@ -58,16 +58,17 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
+import org.apache.htrace.core.ProbabilitySampler;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import com.codahale.metrics.ConsoleReporter;
@@ -172,15 +173,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
       Random rand = new Random(Thread.currentThread().getId());
       WAL wal = region.getWAL();
 
-      TraceScope threadScope =
-        Trace.startSpan("WALPerfEval." + Thread.currentThread().getName());
-      try {
+      try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) {
         long startTime = System.currentTimeMillis();
         int lastSync = 0;
+        TraceUtil.addSampler(loopSampler);
         for (int i = 0; i < numIterations; ++i) {
-          assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
-          TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
-          try {
+          assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected.";
+          try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) {
             long now = System.nanoTime();
             Put put = setupPut(rand, key, value, numFamilies);
             WALEdit walEdit = new WALEdit();
@@ -196,16 +195,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
               }
             }
             latencyHistogram.update(System.nanoTime() - now);
-          } finally {
-            loopScope.close();
           }
         }
         long totalTime = (System.currentTimeMillis() - startTime);
         logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
       } catch (Exception e) {
         LOG.error(getClass().getSimpleName() + " Thread failed", e);
-      } finally {
-        threadScope.close();
       }
     }
   }
@@ -315,8 +310,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
     LOG.info("FileSystem: " + fs);
 
     SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
-    final Sampler<?> sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
-    TraceScope scope = Trace.startSpan("WALPerfEval", sampler);
+    final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
+    TraceUtil.addSampler(sampler);
+    TraceScope scope = TraceUtil.createTrace("WALPerfEval");
 
     try {
       if (rootRegionDir == null) {
@@ -338,8 +334,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
           // a table per desired region means we can avoid carving up the key space
           final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
           regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
-          benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
-              syncInterval, traceFreq));
+          benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
+              syncInterval, traceFreq), "");
         }
         ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
           outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
@@ -389,9 +385,15 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
       }
     } finally {
       // We may be called inside a test that wants to keep on using the fs.
-      if (!noclosefs) fs.close();
-      scope.close();
-      if (receiverHost != null) receiverHost.closeReceivers();
+      if (!noclosefs) {
+        fs.close();
+      }
+      if (scope != null) {
+        scope.close();
+      }
+      if (receiverHost != null) {
+        receiverHost.closeReceivers();
+      }
     }
 
     return(0);
index 86f1426..3c7cb3f 100644 (file)
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-hdfs</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
index 5ecd28c..b0350be 100644 (file)
@@ -16,8 +16,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-HTrace = org.apache.htrace.Trace
-java_import org.apache.htrace.Sampler
+HTrace = org.apache.htrace.core.Tracer
+java_import org.apache.htrace.core.Sampler
 java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
 
 module Shell
index ba54e99..db2c03d 100644 (file)
       <version>${hadoop-two.version}</version>
       <exclusions>
         <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>
         </exclusion>
       <scope>test</scope>
       <exclusions>
         <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>
         </exclusion>
       <scope>test</scope>
       <exclusions>
         <exclusion>
+          <groupId>org.apache.htrace</groupId>
+          <artifactId>htrace-core</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>log4j</groupId>
           <artifactId>log4j</artifactId>
         </exclusion>
index f55ee95..b488d94 100644 (file)
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-common</artifactId>
                     <scope>compile</scope>
+                    <exclusions>
+                        <exclusion>
+                            <groupId>org.apache.htrace</groupId>
+                            <artifactId>htrace-core</artifactId>
+                        </exclusion>
+                    </exclusions>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <scope>compile</scope>
                     <exclusions>
                       <exclusion>
+                        <groupId>org.apache.htrace</groupId>
+                        <artifactId>htrace-core</artifactId>
+                      </exclusion>
+                      <exclusion>
                         <groupId>com.google.guava</groupId>
                         <artifactId>guava</artifactId>
                       </exclusion>
                     <scope>compile</scope>
                     <exclusions>
                       <exclusion>
+                        <groupId>org.apache.htrace</groupId>
+                        <artifactId>htrace-core</artifactId>
+                      </exclusion>
+                      <exclusion>
                         <groupId>com.google.guava</groupId>
                         <artifactId>guava</artifactId>
                       </exclusion>
                     <scope>compile</scope>
                     <exclusions>
                       <exclusion>
+                        <groupId>org.apache.htrace</groupId>
+                        <artifactId>htrace-core</artifactId>
+                      </exclusion>
+                      <exclusion>
                         <groupId>com.google.guava</groupId>
                         <artifactId>guava</artifactId>
                       </exclusion>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-common</artifactId>
                     <scope>compile</scope>
+                    <exclusions>
+                        <exclusion>
+                            <groupId>org.apache.htrace</groupId>
+                            <artifactId>htrace-core</artifactId>
+                        </exclusion>
+                    </exclusions>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
                     <artifactId>hadoop-minicluster</artifactId>
                     <scope>compile</scope>
+                    <exclusions>
+                        <exclusion>
+                            <groupId>org.apache.htrace</groupId>
+                            <artifactId>htrace-core</artifactId>
+                        </exclusion>
+                    </exclusions>
                 </dependency>
                 <dependency>
                     <groupId>org.apache.hadoop</groupId>
index 0dc0dde..6457b33 100644 (file)
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
       </dependencies>
       <build>
diff --git a/pom.xml b/pom.xml
index 132dd62..1321a74 100755 (executable)
--- a/pom.xml
+++ b/pom.xml
     <jruby.version>9.1.10.0</jruby.version>
     <junit.version>4.12</junit.version>
     <hamcrest.version>1.3</hamcrest.version>
-    <htrace.version>3.2.0-incubating</htrace.version>
+    <htrace.version>4.2.0-incubating</htrace.version>
+    <htrace-hadoop.version>3.2.0-incubating</htrace-hadoop.version>
     <log4j.version>1.2.17</log4j.version>
     <mockito-core.version>2.1.0</mockito-core.version>
     <!--Internally we use a different version of protobuf. See hbase-protocol-shaded-->
       </dependency>
       <dependency>
         <groupId>org.apache.htrace</groupId>
-        <artifactId>htrace-core</artifactId>
+        <artifactId>htrace-core4</artifactId>
         <version>${htrace.version}</version>
       </dependency>
       <dependency>
             <artifactId>hadoop-hdfs</artifactId>
             <exclusions>
               <exclusion>
+                <groupId>org.apache.htrace</groupId>
+                <artifactId>htrace-core</artifactId>
+              </exclusion>
+              <exclusion>
                 <groupId>javax.servlet.jsp</groupId>
                 <artifactId>jsp-api</artifactId>
               </exclusion>
             <scope>test</scope>
             <exclusions>
               <exclusion>
+                <groupId>org.apache.htrace</groupId>
+                <artifactId>htrace-core</artifactId>
+              </exclusion>
+              <exclusion>
                 <groupId>javax.servlet.jsp</groupId>
                 <artifactId>jsp-api</artifactId>
               </exclusion>
             <version>${hadoop-two.version}</version>
             <exclusions>
               <exclusion>
+                <groupId>org.apache.htrace</groupId>
+                <artifactId>htrace-core</artifactId>
+              </exclusion>
+              <exclusion>
                 <groupId>commons-beanutils</groupId>
                 <artifactId>commons-beanutils</artifactId>
               </exclusion>
             <artifactId>hadoop-minicluster</artifactId>
             <version>${hadoop-two.version}</version>
             <exclusions>
-            <exclusion>
-              <groupId>commons-httpclient</groupId>
-              <artifactId>commons-httpclient</artifactId>
-            </exclusion>
+              <exclusion>
+                <groupId>org.apache.htrace</groupId>
+                <artifactId>htrace-core</artifactId>
+              </exclusion>
+              <exclusion>
+                <groupId>commons-httpclient</groupId>
+                <artifactId>commons-httpclient</artifactId>
+              </exclusion>
               <exclusion>
                 <groupId>javax.servlet.jsp</groupId>
                 <artifactId>jsp-api</artifactId>
            <artifactId>hadoop-hdfs</artifactId>
            <exclusions>
              <exclusion>
+               <groupId>org.apache.htrace</groupId>
+               <artifactId>htrace-core</artifactId>
+             </exclusion>
+             <exclusion>
                <groupId>javax.servlet.jsp</groupId>
                <artifactId>jsp-api</artifactId>
              </exclusion>
            <scope>test</scope>
            <exclusions>
              <exclusion>
+               <groupId>org.apache.htrace</groupId>
+               <artifactId>htrace-core</artifactId>
+             </exclusion>
+             <exclusion>
                <groupId>javax.servlet.jsp</groupId>
                <artifactId>jsp-api</artifactId>
              </exclusion>
            <artifactId>hadoop-common</artifactId>
            <version>${hadoop-three.version}</version>
            <exclusions>
-            <exclusion>
-              <groupId>commons-beanutils</groupId>
-              <artifactId>commons-beanutils</artifactId>
-            </exclusion>
+             <exclusion>
+               <groupId>org.apache.htrace</groupId>
+               <artifactId>htrace-core</artifactId>
+             </exclusion>
+             <exclusion>
+               <groupId>commons-beanutils</groupId>
+               <artifactId>commons-beanutils</artifactId>
+             </exclusion>
              <exclusion>
                <groupId>javax.servlet.jsp</groupId>
                <artifactId>jsp-api</artifactId>
            <artifactId>hadoop-minicluster</artifactId>
            <version>${hadoop-three.version}</version>
            <exclusions>
-            <exclusion>
-              <groupId>commons-httpclient</groupId>
-              <artifactId>commons-httpclient</artifactId>
-            </exclusion>
+             <exclusion>
+               <groupId>org.apache.htrace</groupId>
+               <artifactId>htrace-core</artifactId>
+             </exclusion>
+             <exclusion>
+               <groupId>commons-httpclient</groupId>
+               <artifactId>commons-httpclient</artifactId>
+             </exclusion>
              <exclusion>
                <groupId>javax.servlet.jsp</groupId>
                <artifactId>jsp-api</artifactId>
index 9db4b7f..8bd1962 100644 (file)
@@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_      for a `hbase.local-fi
 
 <property>
   <name>hbase.trace.spanreceiver.classes</name>
-  <value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
+  <value>org.apache.htrace.core.LocalFileSpanReceiver</value>
 </property>
 <property>
   <name>hbase.htrace.local-file-span-receiver.path</name>
@@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7
 
 <property>
   <name>hbase.trace.spanreceiver.classes</name>
-  <value>org.apache.htrace.impl.ZipkinSpanReceiver</value>
+  <value>org.apache.htrace.core.ZipkinSpanReceiver</value>
 </property>
 <property>
   <name>hbase.htrace.zipkin.collector-hostname</name>