Skip to content

Commit 491603c

Browse files
committed
HDDS-12060. Replace System.currentTimeMillis() with Time.monotonicNow() for duration calculation
1 parent 03c80f8 commit 491603c

File tree

37 files changed

+107
-72
lines changed

37 files changed

+107
-72
lines changed

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
import org.apache.hadoop.ozone.ClientVersion;
6161
import org.apache.hadoop.ozone.OzoneConfigKeys;
6262
import org.apache.hadoop.ozone.OzoneConsts;
63+
import org.apache.hadoop.util.Time;
6364
import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
6465
import org.apache.ratis.thirdparty.io.grpc.Status;
6566
import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts;
@@ -571,7 +572,7 @@ public XceiverClientReply sendCommandAsync(
571572
final CompletableFuture<ContainerCommandResponseProto> replyFuture =
572573
new CompletableFuture<>();
573574
semaphore.acquire();
574-
long requestTime = System.currentTimeMillis();
575+
long requestTime = Time.monotonicNow();
575576
metrics.incrPendingContainerOpsMetrics(request.getCmdType());
576577

577578
// create a new grpc message stream pair for each call.
@@ -601,7 +602,7 @@ public void onCompleted() {
601602

602603
private void decreasePendingMetricsAndReleaseSemaphore() {
603604
metrics.decrPendingContainerOpsMetrics(request.getCmdType());
604-
long cost = System.currentTimeMillis() - requestTime;
605+
long cost = Time.monotonicNow() - requestTime;
605606
metrics.addContainerOpsLatency(request.getCmdType(), cost);
606607
if (LOG.isDebugEnabled()) {
607608
LOG.debug("Executed command {} on datanode {}, cost = {}, cmdType = {}",

hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
4848
import org.apache.hadoop.hdds.security.SecurityConfig;
4949
import org.apache.hadoop.hdds.tracing.TracingUtil;
50+
import org.apache.hadoop.util.Time;
5051
import org.apache.ratis.client.RaftClient;
5152
import org.apache.ratis.client.api.DataStreamApi;
5253
import org.apache.ratis.grpc.GrpcTlsConfig;
@@ -363,7 +364,7 @@ private XceiverClientReply handleFailedAllCommit(long index, Collection<CommitIn
363364
public XceiverClientReply sendCommandAsync(
364365
ContainerCommandRequestProto request) {
365366
XceiverClientReply asyncReply = new XceiverClientReply(null);
366-
long requestTime = System.currentTimeMillis();
367+
long requestTime = Time.monotonicNow();
367368
CompletableFuture<RaftClientReply> raftClientReply =
368369
sendRequestAsync(request);
369370
metrics.incrPendingContainerOpsMetrics(request.getCmdType());
@@ -376,7 +377,7 @@ public XceiverClientReply sendCommandAsync(
376377
}
377378
metrics.decrPendingContainerOpsMetrics(request.getCmdType());
378379
metrics.addContainerOpsLatency(request.getCmdType(),
379-
System.currentTimeMillis() - requestTime);
380+
Time.monotonicNow() - requestTime);
380381
}).thenApply(reply -> {
381382
try {
382383
if (!reply.isSuccess()) {

hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.hadoop.hdds.annotation.InterfaceStability;
4040
import org.apache.hadoop.hdds.conf.ConfigurationSource;
4141
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
42+
import org.apache.hadoop.util.Time;
4243
import org.slf4j.Logger;
4344
import org.slf4j.LoggerFactory;
4445

@@ -95,9 +96,9 @@ public void run() {
9596
LOG.info("Shutdown process invoked a second time: ignoring");
9697
return;
9798
}
98-
long started = System.currentTimeMillis();
99+
long started = Time.monotonicNow();
99100
int timeoutCount = MGR.executeShutdown();
100-
long ended = System.currentTimeMillis();
101+
long ended = Time.monotonicNow();
101102
LOG.debug(String.format(
102103
"Completed shutdown in %.3f seconds; Timeouts: %d",
103104
(ended - started) / 1000.0, timeoutCount));

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
3434
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
3535
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
36+
import org.apache.hadoop.util.Time;
3637
import org.slf4j.Logger;
3738

3839
/**
@@ -89,7 +90,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet,
8990
List<CompletableFuture<Void>> futures = new ArrayList<>();
9091
List<HddsVolume> hddsVolumes = StorageVolumeUtil.getHddsVolumesList(
9192
hddsVolumeSet.getVolumesList());
92-
long start = System.currentTimeMillis();
93+
long start = Time.monotonicNow();
9394
for (HddsVolume volume : hddsVolumes) {
9495
futures.add(CompletableFuture.runAsync(
9596
() -> loadVolume(volume, readOnly, logger)));
@@ -99,7 +100,7 @@ public static void loadAllHddsVolumeDbStore(MutableVolumeSet hddsVolumeSet,
99100
}
100101
if (logger != null) {
101102
logger.info("Load {} volumes DbStore cost: {}ms", hddsVolumes.size(),
102-
System.currentTimeMillis() - start);
103+
Time.monotonicNow() - start);
103104
}
104105
}
105106

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@
9494
import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig;
9595
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures.SchemaV3;
9696
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
97+
import org.apache.hadoop.util.Time;
9798
import org.apache.hadoop.util.Timer;
9899
import org.apache.ratis.grpc.GrpcTlsConfig;
99100
import org.slf4j.Logger;
@@ -314,7 +315,7 @@ public void buildContainerSet() throws IOException {
314315
Iterator<StorageVolume> volumeSetIterator = volumeSet.getVolumesList()
315316
.iterator();
316317
ArrayList<Thread> volumeThreads = new ArrayList<>();
317-
long startTime = System.currentTimeMillis();
318+
long startTime = Time.monotonicNow();
318319

319320
// Load container inspectors that may be triggered at startup based on
320321
// system properties set. These can inspect and possibly repair
@@ -356,7 +357,7 @@ public void buildContainerSet() throws IOException {
356357
ContainerInspectorUtil.unload();
357358

358359
LOG.info("Build ContainerSet costs {}s",
359-
(System.currentTimeMillis() - startTime) / 1000);
360+
(Time.monotonicNow() - startTime) / 1000);
360361
}
361362

362363
/**

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@
7070
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
7171
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
7272
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
73+
import org.apache.hadoop.util.Time;
7374
import org.apache.ozone.test.GenericTestUtils;
7475
import org.junit.jupiter.api.BeforeEach;
7576
import org.junit.jupiter.api.Test;
@@ -465,7 +466,7 @@ public void testDeleteContainerTimeout() throws IOException {
465466
final ContainerSet containerSet = newContainerSet();
466467
final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
467468
final Clock clock = mock(Clock.class);
468-
long startTime = System.currentTimeMillis();
469+
long startTime = Time.monotonicNow();
469470

470471
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
471472
when(clock.millis())

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
6969
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
7070
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
71+
import org.apache.hadoop.util.Time;
7172
import org.apache.ozone.test.GenericTestUtils;
7273
import org.apache.ratis.util.FileUtils;
7374
import org.junit.jupiter.api.AfterEach;
@@ -446,15 +447,15 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo)
446447
(HddsVolume) volumes.get(i), containerSet, conf, true);
447448
threads[i] = new Thread(containerReaders[i]);
448449
}
449-
long startTime = System.currentTimeMillis();
450+
long startTime = Time.monotonicNow();
450451
for (int i = 0; i < volumeNum; i++) {
451452
threads[i].start();
452453
}
453454
for (int i = 0; i < volumeNum; i++) {
454455
threads[i].join();
455456
}
456457
System.out.println("Open " + volumeNum + " Volume with " + containerCount +
457-
" costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
458+
" costs " + (Time.monotonicNow() - startTime) / 1000 + "s");
458459
assertEquals(containerCount,
459460
containerSet.getContainerMap().entrySet().size());
460461
assertEquals(volumeSet.getFailedVolumesList().size(), 0);

hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import java.util.UUID;
2929
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
3030
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
31+
import org.apache.hadoop.util.Time;
3132
import org.junit.jupiter.api.Test;
3233

3334
/**
@@ -104,7 +105,7 @@ public void test() throws InterruptedException {
104105

105106
ReplicationSupervisor rs = ReplicationSupervisor.newBuilder().build();
106107

107-
final long start = System.currentTimeMillis();
108+
final long start = Time.monotonicNow();
108109

109110
//schedule 100 container replication
110111
for (int i = 0; i < 100; i++) {
@@ -114,7 +115,7 @@ public void test() throws InterruptedException {
114115
rs.addTask(new ReplicationTask(fromSources(i, sources), replicator));
115116
}
116117
rs.shutdownAfterFinish();
117-
final long executionTime = System.currentTimeMillis() - start;
118+
final long executionTime = Time.monotonicNow() - start;
118119
System.out.println(executionTime);
119120
assertThat(executionTime)
120121
.withFailMessage("Execution was too slow : " + executionTime + " ms")

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.apache.hadoop.ozone.lease.LeaseExpiredException;
3434
import org.apache.hadoop.ozone.lease.LeaseManager;
3535
import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
36+
import org.apache.hadoop.util.Time;
3637
import org.slf4j.Logger;
3738
import org.slf4j.LoggerFactory;
3839

@@ -116,7 +117,7 @@ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
116117
EventPublisher publisher) {
117118
metrics.incrementTrackedEvents();
118119
long identifier = payload.getId();
119-
startTrackingTimes.put(identifier, System.currentTimeMillis());
120+
startTrackingTimes.put(identifier, Time.monotonicNow());
120121

121122
trackedEventsByID.put(identifier, payload);
122123
trackedEvents.add(payload);
@@ -139,7 +140,7 @@ protected synchronized void handleCompletion(COMPLETION_PAYLOAD
139140
if (trackedEvents.remove(payload)) {
140141
metrics.incrementCompletedEvents();
141142
long originalTime = startTrackingTimes.remove(id);
142-
metrics.updateFinishingTime(System.currentTimeMillis() - originalTime);
143+
metrics.updateFinishingTime(Time.monotonicNow() - originalTime);
143144
onFinished(publisher, payload);
144145
}
145146
}

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/ProtocolMessageMetrics.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import org.apache.hadoop.metrics2.MetricsTag;
2929
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
3030
import org.apache.hadoop.metrics2.lib.Interns;
31+
import org.apache.hadoop.util.Time;
3132
import org.apache.ratis.util.UncheckedAutoCloseable;
3233

3334
/**
@@ -68,12 +69,12 @@ public void increment(KEY key, long duration) {
6869
}
6970

7071
public UncheckedAutoCloseable measure(KEY key) {
71-
final long startTime = System.currentTimeMillis();
72+
final long startTime = Time.monotonicNow();
7273
concurrency.incrementAndGet();
7374
return () -> {
7475
concurrency.decrementAndGet();
7576
counters.get(key).incrementAndGet();
76-
elapsedTimes.get(key).addAndGet(System.currentTimeMillis() - startTime);
77+
elapsedTimes.get(key).addAndGet(Time.monotonicNow() - startTime);
7778
};
7879
}
7980

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
2828
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
2929
import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily;
30+
import org.apache.hadoop.util.Time;
3031
import org.slf4j.Logger;
3132
import org.slf4j.LoggerFactory;
3233

@@ -297,7 +298,7 @@ private List<KeyValue<byte[], byte[]>> getRangeKVs(byte[] startKey,
297298
int count, boolean sequential, byte[] prefix,
298299
MetadataKeyFilters.MetadataKeyFilter... filters)
299300
throws IOException, IllegalArgumentException {
300-
long start = System.currentTimeMillis();
301+
long start = Time.monotonicNow();
301302

302303
if (count < 0) {
303304
throw new IllegalArgumentException(
@@ -342,7 +343,7 @@ && get(startKey) == null) {
342343
}
343344
}
344345
} finally {
345-
long end = System.currentTimeMillis();
346+
long end = Time.monotonicNow();
346347
long timeConsumed = end - start;
347348
if (LOG.isDebugEnabled()) {
348349
if (filters != null) {

hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
import org.apache.hadoop.ozone.upgrade.UpgradeException.ResultCodes;
5151
import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.Status;
5252
import org.apache.hadoop.ozone.upgrade.UpgradeFinalization.StatusAndMessages;
53+
import org.apache.hadoop.util.Time;
5354
import org.apache.ratis.protocol.exceptions.NotLeaderException;
5455

5556
/**
@@ -169,9 +170,9 @@ public void finalizeAndWaitForCompletion(
169170
}
170171

171172
boolean success = false;
172-
long endTime = System.currentTimeMillis() +
173+
long endTime = Time.monotonicNow() +
173174
TimeUnit.SECONDS.toMillis(maxTimeToWaitInSeconds);
174-
while (System.currentTimeMillis() < endTime) {
175+
while (Time.monotonicNow() < endTime) {
175176
try {
176177
response = reportStatus(upgradeClientID, false);
177178
LOG.info("Finalization Messages : {} ", response.msgs());

hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@
8989
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
9090
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
9191
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
92+
import org.apache.hadoop.util.Time;
9293
import org.apache.ozone.compaction.log.CompactionFileInfo;
9394
import org.apache.ozone.compaction.log.CompactionLogEntry;
9495
import org.apache.ozone.rocksdb.util.RdbUtil;
@@ -758,7 +759,7 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ)
758759
private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException {
759760

760761
LOG.trace("Current time: " + System.currentTimeMillis());
761-
long t1 = System.currentTimeMillis();
762+
long t1 = Time.monotonicNow();
762763

763764
final long snapshotGeneration = rocksDB.get().getLatestSequenceNumber();
764765
final String cpPath = CP_PATH_PREFIX + snapshotGeneration;
@@ -780,7 +781,7 @@ private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException {
780781
colHandle));
781782
this.snapshots.add(currentSnapshot);
782783

783-
long t2 = System.currentTimeMillis();
784+
long t2 = Time.monotonicNow();
784785
LOG.trace("Current time: " + t2);
785786
LOG.debug("Time elapsed: " + (t2 - t1) + " ms");
786787
}

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
import org.apache.hadoop.hdds.scm.ha.SCMContext;
5252
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
5353
import org.apache.hadoop.hdds.server.events.EventPublisher;
54+
import org.apache.hadoop.util.Time;
5455
import org.slf4j.Logger;
5556
import org.slf4j.LoggerFactory;
5657

@@ -527,7 +528,7 @@ public synchronized void startMaintenance(DatanodeDetails dn, int endInHours)
527528
long maintenanceEnd = 0;
528529
if (endInHours != 0) {
529530
maintenanceEnd =
530-
(System.currentTimeMillis() / 1000L) + (endInHours * 60L * 60L);
531+
(Time.monotonicNow() / 1000L) + (endInHours * 60L * 60L);
531532
}
532533
if (opState == NodeOperationalState.IN_SERVICE) {
533534
nodeManager.setNodeOperationalState(

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import java.util.Objects;
2323
import java.util.Set;
2424
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
25+
import org.apache.hadoop.util.Time;
2526

2627
/**
2728
* This class is used to capture the current status of a datanode. This
@@ -123,7 +124,7 @@ public boolean operationalStateExpired() {
123124
if (0 == opStateExpiryEpochSeconds) {
124125
return false;
125126
}
126-
return System.currentTimeMillis() / 1000 >= opStateExpiryEpochSeconds;
127+
return Time.monotonicNow() / 1000 >= opStateExpiryEpochSeconds;
127128
}
128129

129130
public boolean isInService() {

hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java

+2-1
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
5757
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
5858
import org.apache.hadoop.hdds.server.events.EventQueue;
59+
import org.apache.hadoop.util.Time;
5960
import org.junit.jupiter.api.BeforeEach;
6061
import org.junit.jupiter.api.Test;
6162
import org.junit.jupiter.api.io.TempDir;
@@ -380,7 +381,7 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception {
380381
List<DatanodeDetails> dns = generateDatanodes();
381382

382383
long maintenanceEnd =
383-
(System.currentTimeMillis() / 1000L) + (100 * 60L * 60L);
384+
(Time.monotonicNow() / 1000L) + (100 * 60L * 60L);
384385

385386
// Put 1 node into entering_maintenance, 1 node into decommissioning
386387
// and 1 node into in_maintenance.

hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,7 @@ public void testSetNodeOpStateAndCommandFired()
637637
DatanodeDetails dn = HddsTestUtils.createRandomDatanodeAndRegister(
638638
nodeManager);
639639

640-
long expiry = System.currentTimeMillis() / 1000 + 1000;
640+
long expiry = Time.monotonicNow() / 1000 + 1000;
641641
nodeManager.setNodeOperationalState(dn,
642642
HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, expiry);
643643

hadoop-hdds/test-utils/src/test/java/org/apache/ozone/test/LambdaTestUtils.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import com.google.common.base.Preconditions;
2121
import java.util.concurrent.Callable;
2222
import java.util.concurrent.TimeoutException;
23+
import org.apache.hadoop.util.Time;
2324
import org.slf4j.Logger;
2425
import org.slf4j.LoggerFactory;
2526

@@ -105,7 +106,7 @@ public static int await(int timeoutMillis,
105106
"timeoutMillis must be >= 0");
106107
Preconditions.checkNotNull(timeoutHandler);
107108

108-
final long endTime = System.currentTimeMillis() + timeoutMillis;
109+
final long endTime = Time.monotonicNow() + timeoutMillis;
109110
Throwable ex = null;
110111
boolean running = true;
111112
int iterations = 0;
@@ -126,7 +127,7 @@ public static int await(int timeoutMillis,
126127
LOG.debug("await() iteration {}", iterations, e);
127128
ex = e;
128129
}
129-
running = System.currentTimeMillis() < endTime;
130+
running = Time.monotonicNow() < endTime;
130131
if (running) {
131132
int sleeptime = retry.call();
132133
if (sleeptime >= 0) {

0 commit comments

Comments
 (0)