Skip to content

Commit 0bd85bb

Browse files
authored
Wrap expensive log trace calls with if enabled condition (#3185)
While this log strings and logging operations are not taken when the log level is not at the trace level, if the parameter passed to the log statement is expensive to fetch that price is still paid every time (depending on JIT). To make our expansive logging more performance we wrap trace logs with a conditional check if the parameters passed are classified as potentially expensive.
1 parent 879e82d commit 0bd85bb

File tree

4 files changed

+91
-49
lines changed

4 files changed

+91
-49
lines changed

ambry-network/src/main/java/com/github/ambry/network/SSLTransmission.java

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -182,9 +182,11 @@ private int readFromSocketChannel() throws IOException {
182182
private boolean flush(ByteBuffer buf) throws IOException {
183183
int remaining = buf.remaining();
184184
if (remaining > 0) {
185-
long startNs = SystemTime.getInstance().nanoseconds();
185+
long startNs = logger.isTraceEnabled() ? SystemTime.getInstance().nanoseconds() : 0;
186186
int written = socketChannel.write(buf);
187-
logger.trace("Flushed {} bytes in {} ns", written, SystemTime.getInstance().nanoseconds() - startNs);
187+
if (logger.isTraceEnabled()) {
188+
logger.trace("Flushed {} bytes in {} ns", written, SystemTime.getInstance().nanoseconds() - startNs);
189+
}
188190
return written >= remaining;
189191
}
190192
return true;
@@ -412,8 +414,10 @@ public boolean read() throws IOException {
412414
long startTimeMs = time.milliseconds();
413415
long bytesRead = networkReceive.getReceivedBytes().readFrom(this);
414416
long readTimeMs = time.milliseconds() - startTimeMs;
415-
logger.trace("Bytes read {} from {} using key {} Time: {} Ms.", bytesRead,
416-
socketChannel.socket().getRemoteSocketAddress(), getConnectionId(), readTimeMs);
417+
if (logger.isTraceEnabled()) {
418+
logger.trace("Bytes read {} from {} using key {} Time: {} Ms.", bytesRead,
419+
socketChannel.socket().getRemoteSocketAddress(), getConnectionId(), readTimeMs);
420+
}
417421
if (bytesRead > 0) {
418422
metrics.transmissionReceiveTime.update(readTimeMs);
419423
metrics.transmissionReceiveSize.update(bytesRead);
@@ -529,8 +533,10 @@ public boolean write() throws IOException {
529533
long startTimeMs = time.milliseconds();
530534
long bytesWritten = send.writeTo(this);
531535
long writeTimeMs = time.milliseconds() - startTimeMs;
532-
logger.trace("Bytes written {} to {} using key {} Time: {} ms", bytesWritten,
533-
socketChannel.socket().getRemoteSocketAddress(), getConnectionId(), writeTimeMs);
536+
if (logger.isTraceEnabled()) {
537+
logger.trace("Bytes written {} to {} using key {} Time: {} ms", bytesWritten,
538+
socketChannel.socket().getRemoteSocketAddress(), getConnectionId(), writeTimeMs);
539+
}
534540
if (bytesWritten > 0) {
535541
metrics.transmissionSendTime.update(writeTimeMs);
536542
metrics.transmissionSendSize.update(bytesWritten);

ambry-replication/src/main/java/com/github/ambry/replication/ReplicaThread.java

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -302,19 +302,23 @@ protected void setTerminateCurrentContinuousReplicationCycle(boolean value) {
302302
*/
303303
protected void logReplicationStatus(RemoteReplicaInfo remoteReplicaInfo,
304304
ExchangeMetadataResponse exchangeMetadataResponse) {
305-
logger.trace("ReplicationStatus | {} | {} | isSealed = {} | Token = {} | localLagFromRemoteInBytes = {}",
306-
remoteReplicaInfo, remoteReplicaInfo.getReplicaId().getReplicaType(),
307-
remoteReplicaInfo.getReplicaId().isSealed(), remoteReplicaInfo.getToken().toString(),
308-
exchangeMetadataResponse.localLagFromRemoteInBytes);
305+
if (logger.isTraceEnabled()) {
306+
logger.trace("ReplicationStatus | {} | {} | isSealed = {} | Token = {} | localLagFromRemoteInBytes = {}",
307+
remoteReplicaInfo, remoteReplicaInfo.getReplicaId().getReplicaType(),
308+
remoteReplicaInfo.getReplicaId().isSealed(), remoteReplicaInfo.getToken().toString(),
309+
exchangeMetadataResponse.localLagFromRemoteInBytes);
310+
}
309311
}
310312

311313
@Override
312314
public void run() {
313315
try {
314-
logger.trace("Starting replica thread on Local node: {} Thread name: {}", dataNodeId, threadName);
315-
for (Map.Entry<DataNodeId, Set<RemoteReplicaInfo>> replicasToReplicateEntry : replicasToReplicateGroupedByNode.entrySet()) {
316-
logger.trace("Remote node: {} Thread name: {} ReplicasToReplicate: {}", replicasToReplicateEntry.getKey(),
317-
threadName, replicasToReplicateEntry.getValue());
316+
if (logger.isTraceEnabled()) {
317+
logger.trace("Starting replica thread on Local node: {} Thread name: {}", dataNodeId, threadName);
318+
for (Map.Entry<DataNodeId, Set<RemoteReplicaInfo>> replicasToReplicateEntry : replicasToReplicateGroupedByNode.entrySet()) {
319+
logger.trace("Remote node: {} Thread name: {} ReplicasToReplicate: {}", replicasToReplicateEntry.getKey(),
320+
threadName, replicasToReplicateEntry.getValue());
321+
}
318322
}
319323
logger.info("Begin iteration for thread {}", threadName);
320324
while (running) {
@@ -394,8 +398,10 @@ public void addRemoteReplicaInfo(RemoteReplicaInfo remoteReplicaInfo) {
394398
terminateCurrentContinuousReplicationCycle = true;
395399
lock.unlock();
396400
}
397-
logger.trace("RemoteReplicaInfo {} is added to ReplicaThread {}. Now working on {} dataNodeIds.", remoteReplicaInfo,
398-
threadName, replicasToReplicateGroupedByNode.keySet().size());
401+
if (logger.isTraceEnabled()) {
402+
logger.trace("RemoteReplicaInfo {} is added to ReplicaThread {}. Now working on {} dataNodeIds.", remoteReplicaInfo,
403+
threadName, replicasToReplicateGroupedByNode.keySet().size());
404+
}
399405
}
400406

401407
/**
@@ -1226,8 +1232,10 @@ ReplicaMetadataResponse getReplicaMetadataResponse(List<RemoteReplicaInfo> repli
12261232
try {
12271233
ReplicaMetadataRequest request = createReplicaMetadataRequest(replicasToReplicatePerNode, remoteNode);
12281234
channelOutput = connectedChannel.sendAndReceive(request);
1229-
logger.trace("Remote node: {} Thread name: {} Remote replicas: {} Stream size after deserialization: {} ",
1230-
remoteNode, threadName, replicasToReplicatePerNode, channelOutput.getInputStream().available());
1235+
if (logger.isTraceEnabled()) {
1236+
logger.trace("Remote node: {} Thread name: {} Remote replicas: {} Stream size after deserialization: {} ",
1237+
remoteNode, threadName, replicasToReplicatePerNode, channelOutput.getInputStream().available());
1238+
}
12311239
ReplicaMetadataResponse response =
12321240
ReplicaMetadataResponse.readFrom(channelOutput.getInputStream(), findTokenHelper, clusterMap);
12331241

@@ -1495,7 +1503,7 @@ GetRequest createGetRequest(List<RemoteReplicaInfo> replicasToReplicatePerNode,
14951503
if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.NoError) {
14961504
Set<StoreKey> missingStoreKeys = exchangeMetadataResponse.getMissingStoreKeys();
14971505
if (missingStoreKeys.size() > 0) {
1498-
if (remoteNode instanceof CloudDataNode) {
1506+
if (remoteNode instanceof CloudDataNode && logger.isTraceEnabled()) {
14991507
logger.trace("Replicating blobs from CloudDataNode: {}", missingStoreKeys);
15001508
}
15011509
ArrayList<BlobId> keysToFetch = new ArrayList<>();
@@ -1601,10 +1609,12 @@ private void handleGetResponse(GetResponse getResponse, List<RemoteReplicaInfo>
16011609
if (partitionResponseInfo.getErrorCode() == ServerErrorCode.NoError) {
16021610
List<MessageInfo> messageInfoList = partitionResponseInfo.getMessageInfoList();
16031611
try {
1604-
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Messages to fix: {} "
1605-
+ "Partition: {} Local mount path: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(),
1606-
exchangeMetadataResponse.getMissingStoreKeys(), remoteReplicaInfo.getReplicaId().getPartitionId(),
1607-
remoteReplicaInfo.getLocalReplicaId().getMountPath());
1612+
if (logger.isTraceEnabled()) {
1613+
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Messages to fix: {} "
1614+
+ "Partition: {} Local mount path: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(),
1615+
exchangeMetadataResponse.getMissingStoreKeys(), remoteReplicaInfo.getReplicaId().getPartitionId(),
1616+
remoteReplicaInfo.getLocalReplicaId().getMountPath());
1617+
}
16081618

16091619
List<Transformer> transformers = transformer == null ? Collections.emptyList()
16101620
: Collections.singletonList(transformer);
@@ -1628,10 +1638,12 @@ private void handleGetResponse(GetResponse getResponse, List<RemoteReplicaInfo>
16281638
totalBytesFixed += messageInfo.getSize();
16291639

16301640
replicationMetrics.updateReplicationFetchBytes(remoteReplicaInfo, messageInfo.getSize());
1631-
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Message replicated: {} Partition: {} "
1632-
+ "Local mount path: {} Message size: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(),
1633-
messageInfo.getStoreKey(), remoteReplicaInfo.getReplicaId().getPartitionId(),
1634-
remoteReplicaInfo.getLocalReplicaId().getMountPath(), messageInfo.getSize());
1641+
if (logger.isTraceEnabled()) {
1642+
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Message replicated: {} Partition: {} "
1643+
+ "Local mount path: {} Message size: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(),
1644+
messageInfo.getStoreKey(), remoteReplicaInfo.getReplicaId().getPartitionId(),
1645+
remoteReplicaInfo.getLocalReplicaId().getMountPath(), messageInfo.getSize());
1646+
}
16351647
if (notification != null) {
16361648
notification.onBlobReplicaCreated(dataNodeId.getHostname(), dataNodeId.getPort(),
16371649
messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
@@ -1732,9 +1744,11 @@ protected void advanceToken(RemoteReplicaInfo remoteReplicaInfo, ExchangeMetadat
17321744
remoteReplicaInfo.setLocalLagFromRemoteInBytes(exchangeMetadataResponse.localLagFromRemoteInBytes);
17331745
// reset stored metadata response for this replica
17341746
remoteReplicaInfo.setExchangeMetadataResponse(new ExchangeMetadataResponse(ServerErrorCode.NoError));
1735-
logger.trace("Thread name: {}, updating token {} and lag {} for partition {} for Remote replica {}", threadName,
1736-
remoteReplicaInfo.getToken(), remoteReplicaInfo.getLocalLagFromRemoteInBytes(),
1737-
remoteReplicaInfo.getReplicaId().getPartitionId().toString(), remoteReplicaInfo.getReplicaId());
1747+
if (logger.isTraceEnabled()) {
1748+
logger.trace("Thread name: {}, updating token {} and lag {} for partition {} for Remote replica {}", threadName,
1749+
remoteReplicaInfo.getToken(), remoteReplicaInfo.getLocalLagFromRemoteInBytes(),
1750+
remoteReplicaInfo.getReplicaId().getPartitionId().toString(), remoteReplicaInfo.getReplicaId());
1751+
}
17381752
}
17391753

17401754
/**

ambry-store/src/main/java/com/github/ambry/store/BlobStoreStats.java

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,12 @@ class BlobStoreStats implements StoreStats, Closeable {
147147

148148
@Override
149149
public Pair<Long, Long> getValidSize(TimeRange timeRange) throws StoreException {
150-
long start = SystemTime.getInstance().milliseconds();
150+
long start = logger.isDebugEnabled() ? SystemTime.getInstance().milliseconds() : 0;
151151
Pair<Long, NavigableMap<LogSegmentName, Long>> logSegmentValidSizeResult = getValidDataSizeByLogSegment(timeRange);
152-
logger.debug("Time to getValidDataSizeByLogSegment on store {} : {} ms", storeId,
153-
SystemTime.getInstance().milliseconds() - start);
152+
if (logger.isDebugEnabled()) {
153+
logger.debug("Time to getValidDataSizeByLogSegment on store {} : {} ms", storeId,
154+
SystemTime.getInstance().milliseconds() - start);
155+
}
154156
Long totalValidSize = 0L;
155157
for (Long value : logSegmentValidSizeResult.getSecond().values()) {
156158
totalValidSize += value;

ambry-store/src/main/java/com/github/ambry/store/PersistentIndex.java

Lines changed: 36 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1544,7 +1544,9 @@ FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries) throws St
15441544
try {
15451545
Offset logEndOffsetBeforeFind = log.getEndOffset();
15461546
StoreFindToken storeToken = resetTokenIfRequired((StoreFindToken) token);
1547-
logger.trace("Time used to validate token: {}", (time.milliseconds() - startTimeInMs));
1547+
if (logger.isTraceEnabled()) {
1548+
logger.trace("Time used to validate token: {}", (time.milliseconds() - startTimeInMs));
1549+
}
15481550
ConcurrentSkipListMap<Offset, IndexSegment> indexSegments = validIndexSegments;
15491551
storeToken = revalidateStoreFindToken(storeToken, indexSegments, false);
15501552

@@ -1558,7 +1560,9 @@ FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries) throws St
15581560
logger.trace("Index : {} getting entries since {}", dataDir, offsetToStart);
15591561
// check journal
15601562
List<JournalEntry> entries = journal.getEntriesSince(offsetToStart, storeToken.getInclusive());
1561-
logger.trace("Journal based token, Time used to get entries: {}", (time.milliseconds() - startTimeInMs));
1563+
if (logger.isTraceEnabled()) {
1564+
logger.trace("Journal based token, Time used to get entries: {}", (time.milliseconds() - startTimeInMs));
1565+
}
15621566
// we deliberately obtain a snapshot of the index segments AFTER fetching from the journal. This ensures that
15631567
// any and all entries returned from the journal are guaranteed to be in the obtained snapshot of indexSegments.
15641568
indexSegments = validIndexSegments;
@@ -1583,14 +1587,18 @@ FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries) throws St
15831587
break;
15841588
}
15851589
}
1586-
logger.trace("Journal based token, Time used to generate message entries: {}",
1587-
(time.milliseconds() - startTimeInMs));
1590+
if (logger.isTraceEnabled()) {
1591+
logger.trace("Journal based token, Time used to generate message entries: {}",
1592+
(time.milliseconds() - startTimeInMs));
1593+
}
15881594

15891595
startTimeInMs = time.milliseconds();
15901596
logger.trace("Index : {} new offset from find info {}", dataDir, offsetEnd);
15911597
eliminateDuplicates(messageEntries);
1592-
logger.trace("Journal based token, Time used to eliminate duplicates: {}",
1593-
(time.milliseconds() - startTimeInMs));
1598+
if (logger.isTraceEnabled()) {
1599+
logger.trace("Journal based token, Time used to eliminate duplicates: {}",
1600+
(time.milliseconds() - startTimeInMs));
1601+
}
15941602
IndexSegment segmentOfToken = indexSegments.floorEntry(offsetEnd).getValue();
15951603
StoreFindToken storeFindToken =
15961604
new StoreFindToken(offsetEnd, sessionId, incarnationId, false, segmentOfToken.getResetKey(),
@@ -1616,20 +1624,26 @@ FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries) throws St
16161624
startTimeInMs = time.milliseconds();
16171625
newToken = findEntriesFromSegmentStartOffset(entry.getKey(), null, messageEntries,
16181626
new FindEntriesCondition(maxTotalSizeOfEntries), indexSegments, storeToken.getInclusive());
1619-
logger.trace("Journal based to segment based token, Time used to find entries: {}",
1620-
(time.milliseconds() - startTimeInMs));
1627+
if (logger.isTraceEnabled()) {
1628+
logger.trace("Journal based to segment based token, Time used to find entries: {}",
1629+
(time.milliseconds() - startTimeInMs));
1630+
}
16211631

16221632
startTimeInMs = time.milliseconds();
16231633
updateStateForMessages(messageEntries);
1624-
logger.trace("Journal based to segment based token, Time used to update state: {}",
1625-
(time.milliseconds() - startTimeInMs));
1634+
if (logger.isTraceEnabled()) {
1635+
logger.trace("Journal based to segment based token, Time used to update state: {}",
1636+
(time.milliseconds() - startTimeInMs));
1637+
}
16261638
} else {
16271639
newToken = storeToken;
16281640
}
16291641
startTimeInMs = time.milliseconds();
16301642
eliminateDuplicates(messageEntries);
1631-
logger.trace("Journal based to segment based token, Time used to eliminate duplicates: {}",
1632-
(time.milliseconds() - startTimeInMs));
1643+
if (logger.isTraceEnabled()) {
1644+
logger.trace("Journal based to segment based token, Time used to eliminate duplicates: {}",
1645+
(time.milliseconds() - startTimeInMs));
1646+
}
16331647
logger.trace("Index [{}]: new FindInfo [{}]", dataDir, newToken);
16341648
long totalBytesRead = getTotalBytesRead(newToken, messageEntries, logEndOffsetBeforeFind, indexSegments);
16351649
newToken.setBytesRead(totalBytesRead);
@@ -1650,16 +1664,22 @@ FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries) throws St
16501664
newToken = findEntriesFromSegmentStartOffset(storeToken.getOffset(), storeToken.getStoreKey(), messageEntries,
16511665
new FindEntriesCondition(maxTotalSizeOfEntries), indexSegments, storeToken.getInclusive());
16521666
}
1653-
logger.trace("Segment based token, Time used to find entries: {}", (time.milliseconds() - startTimeInMs));
1667+
if (logger.isTraceEnabled()) {
1668+
logger.trace("Segment based token, Time used to find entries: {}", (time.milliseconds() - startTimeInMs));
1669+
}
16541670

16551671
startTimeInMs = time.milliseconds();
16561672
updateStateForMessages(messageEntries);
1657-
logger.trace("Segment based token, Time used to update state: {}", (time.milliseconds() - startTimeInMs));
1673+
if (logger.isTraceEnabled()) {
1674+
logger.trace("Segment based token, Time used to update state: {}", (time.milliseconds() - startTimeInMs));
1675+
}
16581676

16591677
startTimeInMs = time.milliseconds();
16601678
eliminateDuplicates(messageEntries);
1661-
logger.trace("Segment based token, Time used to eliminate duplicates: {}",
1662-
(time.milliseconds() - startTimeInMs));
1679+
if (logger.isTraceEnabled()) {
1680+
logger.trace("Segment based token, Time used to eliminate duplicates: {}",
1681+
(time.milliseconds() - startTimeInMs));
1682+
}
16631683

16641684
long totalBytesRead = getTotalBytesRead(newToken, messageEntries, logEndOffsetBeforeFind, indexSegments);
16651685
newToken.setBytesRead(totalBytesRead);

0 commit comments

Comments
 (0)