Skip to content

Commit 83ce669

Browse files
Remove old assertions and include new ones
1 parent e750f61 commit 83ce669

File tree

13 files changed

+73
-266
lines changed

13 files changed

+73
-266
lines changed

dd-java-agent/agent-debugger/src/main/java/com/datadog/debugger/agent/ConfigurationUpdater.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import java.util.stream.Collectors;
3535
import org.slf4j.Logger;
3636
import org.slf4j.LoggerFactory;
37+
import com.antithesis.sdk.Assert;
3738

3839
/**
3940
* Handles configuration updates if required by installing a new ClassFileTransformer and triggering
@@ -95,6 +96,8 @@ public void accept(Source source, Collection<? extends ProbeDefinition> definiti
9596
applyNewConfiguration(newConfiguration);
9697
} catch (RuntimeException e) {
9798
ExceptionHelper.logException(LOGGER, e, "Error during accepting new debugger configuration:");
99+
LOGGER.debug("ANTITHESIS_ASSERT: ConfigurationUpdater.accept should sometimes throw a runtime exception (sometimes)");
100+
Assert.sometimes("ConfigurationUpdater.accept should sometimes throw a runtime exception");
98101
throw e;
99102
}
100103
}
@@ -143,9 +146,15 @@ private void applyNewConfiguration(Configuration newConfiguration) {
143146
currentConfiguration = newConfiguration;
144147
if (changes.hasProbeRelatedChanges()) {
145148
LOGGER.debug("Applying new probe configuration, changes: {}", changes);
149+
LOGGER.debug("ANTITHESIS_ASSERT: ConfigurationUpdater.handleProbesChanges should sometimes be called (sometimes)");
150+
Assert.sometimes("ConfigurationUpdater.handleProbesChanges should sometimes be called");
146151
handleProbesChanges(changes, newConfiguration);
147152
}
153+
LOGGER.debug("ANTITHESIS_ASSERT: ConfigurationUpdater.applyNewConfiguration should always be successful (always)");
154+
Assert.always("ConfigurationUpdater.applyNewConfiguration should always be successful");
148155
} finally {
156+
LOGGER.debug("ANTITHESIS_ASSERT: ConfigurationUpdater.applyNewConfiguration should always be reachable (reachable)");
157+
Assert.reachable("ConfigurationUpdater.applyNewConfiguration should always be reachable");
149158
configurationLock.unlock();
150159
}
151160
}

dd-java-agent/agent-profiling/profiling-controller-openjdk/src/main/java/com/datadog/profiling/controller/openjdk/OpenJdkController.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
import java.util.Map;
5151
import org.slf4j.Logger;
5252
import org.slf4j.LoggerFactory;
53+
import com.antithesis.sdk.Assert;
5354

5455
/**
5556
* This is the implementation of the controller for OpenJDK. It should work for JDK 11+ today, and
@@ -289,6 +290,8 @@ private static String getJfrRepositoryBase(ConfigProvider configProvider) {
289290
Files.createDirectories(repositoryPath);
290291
} catch (IOException e) {
291292
log.error("Failed to create JFR repository directory: {}", repositoryPath, e);
293+
log.debug("ANTITHESIS_ASSERT: Failed to create JFR repository directory (unreachable)");
294+
Assert.unreachable("Failed to create JFR repository directory");
292295
throw new IllegalStateException(
293296
"Failed to create JFR repository directory: " + repositoryPath, e);
294297
}

dd-java-agent/agent-profiling/profiling-controller/src/main/java/com/datadog/profiling/controller/ProfilingSystem.java

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
import java.util.concurrent.TimeUnit;
3636
import org.slf4j.Logger;
3737
import org.slf4j.LoggerFactory;
38+
import com.antithesis.sdk.Assert;
3839

3940
/** Sets up the profiling strategy and schedules the profiling recordings. */
4041
public final class ProfilingSystem {
@@ -196,9 +197,12 @@ private void startProfilingRecording() {
196197
if (t != null) {
197198
if (t instanceof IllegalStateException && "Shutdown in progress".equals(t.getMessage())) {
198199
ProfilerFlareLogger.getInstance().log("Shutdown in progress, cannot start profiling");
200+
log.debug("ANTITHESIS_ASSERT: Shutdown in progress, cannot start profiling (sometimes)");
201+
Assert.sometimes("Shutdown in progress, cannot start profiling");
199202
} else {
200203
ProfilerFlareLogger.getInstance().log("Failed to start profiling", t);
201-
204+
log.debug("ANTITHESIS_ASSERT: Failed to start profiling (unreachable)", t);
205+
Assert.unreachable("Failed to start profiling");
202206
throw t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t);
203207
}
204208
}
@@ -275,13 +279,17 @@ public void snapshot(boolean onShutdown) {
275279
// the last recording end time plus one nano second. The reason for this is that when
276280
// JFR is filtering the stream it will only discard earlier chunks that have an end
277281
// time that is before (not before or equal to) the requested start time of the filter.
282+
log.debug("ANTITHESIS_ASSERT: Snapshot created (always) - lastSnapshot != null: {}", (lastSnapshot != null));
283+
Assert.always(lastSnapshot != null, "Snapshot created");
278284
lastSnapshot = recordingData.getEnd().plus(ONE_NANO);
279285
dataListener.onNewData(recordingType, recordingData, onShutdown);
280286
} else {
281287
lastSnapshot = Instant.now();
282288
}
283289
} catch (final Exception e) {
284290
log.error(SEND_TELEMETRY, "Exception in profiling thread, continuing", e);
291+
log.debug("ANTITHESIS_ASSERT: Exception in profiling thread, continuing (unreachable)", e);
292+
Assert.unreachable("Exception in profiling thread, continuing");
285293
} catch (final Throwable t) {
286294
/*
287295
Try to continue even after fatal exception. It seems to be useful to attempt to store profile when this happens.
@@ -294,6 +302,8 @@ public void snapshot(boolean onShutdown) {
294302
} catch (final Throwable t2) {
295303
// This should almost never happen and there is not much we can do here in cases like
296304
// OutOfMemoryError, so we will just ignore this.
305+
log.debug("ANTITHESIS_ASSERT: Fatal exception in profiling thread, trying to continue (unreachable)");
306+
Assert.unreachable("Fatal exception in profiling thread, trying to continue");
297307
}
298308
}
299309
}

dd-java-agent/agent-profiling/profiling-ddprof/src/main/java/com/datadog/profiling/ddprof/DatadogProfiler.java

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
import javax.annotation.Nullable;
5757
import org.slf4j.Logger;
5858
import org.slf4j.LoggerFactory;
59-
59+
import com.antithesis.sdk.Assert;
6060
/**
6161
* It is currently assumed that this class can be initialised early so that Datadog profiler's
6262
* thread filter captures all tracing activity, which means it must not be modified to depend on
@@ -189,6 +189,8 @@ public OngoingRecording start() {
189189
return new DatadogProfilerRecording(this);
190190
} catch (IOException | IllegalStateException e) {
191191
log.debug("Failed to start Datadog profiler recording", e);
192+
log.debug("ANTITHESIS_ASSERT: Failed to start Datadog profiler recording (unreachable)");
193+
Assert.unreachable("Failed to start Datadog profiler recording");
192194
return null;
193195
}
194196
}
@@ -203,12 +205,16 @@ public RecordingData stop(OngoingRecording recording) {
203205
void stopProfiler() {
204206
if (recordingFlag.compareAndSet(true, false)) {
205207
profiler.stop();
208+
log.debug("ANTITHESIS_ASSERT: Checking if profiling is still active after stop (sometimes) - active: {}", isActive());
209+
Assert.sometimes(isActive(),"Profiling is still active. Waiting to stop.");
206210
if (isActive()) {
207211
log.debug("Profiling is still active. Waiting to stop.");
208212
while (isActive()) {
209213
LockSupport.parkNanos(10_000_000L);
210214
}
211215
}
216+
log.debug("ANTITHESIS_ASSERT: Profiling should be stopped (always) - active: {}", isActive());
217+
Assert.always(!isActive(),"Profiling is stopped");
212218
}
213219
}
214220

@@ -222,6 +228,8 @@ public boolean isActive() {
222228
log.debug("Datadog Profiler Status = {}", status);
223229
return !status.contains("not active");
224230
} catch (IOException ignored) {
231+
log.debug("ANTITHESIS_ASSERT: Failed to get Datadog profiler status (unreachable)");
232+
Assert.unreachable("Failed to get Datadog profiler status");
225233
}
226234
return false;
227235
}
@@ -244,10 +252,14 @@ Path newRecording() throws IOException, IllegalStateException {
244252
log.warn("Unable to start Datadog profiler recording: {}", e.getMessage());
245253
}
246254
recordingFlag.set(false);
255+
log.debug("ANTITHESIS_ASSERT: Unable to start Datadog profiler recording (unreachable)");
256+
Assert.unreachable("Unable to start Datadog profiler recording");
247257
throw e;
248258
}
249259
return recFile;
250260
}
261+
log.debug("ANTITHESIS_ASSERT: Datadog profiler session has already been started (unreachable)");
262+
Assert.unreachable("Datadog profiler session has already been started");
251263
throw new IllegalStateException("Datadog profiler session has already been started");
252264
}
253265

dd-java-agent/agent-profiling/profiling-uploader/src/main/java/com/datadog/profiling/uploader/ProfileUploader.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@
6969
import okhttp3.ResponseBody;
7070
import org.slf4j.Logger;
7171
import org.slf4j.LoggerFactory;
72+
import com.antithesis.sdk.Assert;
7273

7374
/** The class for uploading profiles to the backend. */
7475
public final class ProfileUploader {
@@ -301,6 +302,8 @@ public void onFailure(final Call call, final IOException e) {
301302
// But, in any case, we have this safety-break in place to prevent blocking finishing the
302303
// sync request to a misbehaving server.
303304
if (handled.compareAndSet(false, true)) {
305+
log.debug("ANTITHESIS_ASSERT: Upload timeout (unreachable)");
306+
Assert.unreachable("Upload timeout");
304307
handleFailure(call, null, data, onCompletion);
305308
}
306309
}
@@ -351,6 +354,8 @@ private void handleResponse(
351354
"Failed to upload profile, it's too big. Dumping information about the profile");
352355
JfrCliHelper.invokeOn(data, ioLogger);
353356
} else {
357+
log.debug("ANTITHESIS_ASSERT: Failed to upload profile (unreachable) - response code: {}", response.code());
358+
Assert.unreachable("Failed to upload profile");
354359
ioLogger.error("Failed to upload profile", getLoggerResponse(response));
355360
}
356361
}

dd-java-agent/agent-profiling/src/main/java/com/datadog/profiling/agent/ProfilingAgent.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
import java.util.regex.Pattern;
3838
import org.slf4j.Logger;
3939
import org.slf4j.LoggerFactory;
40+
import com.antithesis.sdk.Assert;
4041

4142
/** Profiling agent implementation */
4243
public class ProfilingAgent {
@@ -81,6 +82,8 @@ public void onNewData(RecordingType type, RecordingData data, boolean handleSync
8182
log.debug("Debug profile stored as {}", tmp);
8283
} catch (IOException e) {
8384
log.debug("Unable to write debug profile dump", e);
85+
log.debug("ANTITHESIS_ASSERT: Unable to write debug profile dump (unreachable)");
86+
Assert.unreachable("Unable to write debug profile dump");
8487
}
8588
}
8689
}
@@ -169,11 +172,15 @@ public static synchronized boolean run(final boolean earlyStart, Instrumentation
169172
This means that if/when we implement functionality to manually shutdown profiler we would
170173
need to not forget to add code that removes this shutdown hook from JVM.
171174
*/
175+
log.debug("ANTITHESIS_ASSERT: Shutdown hook added (always) - uploader != null: {}", (uploader != null));
176+
Assert.always(uploader!= null, "Shutdown hook added");
172177
Runtime.getRuntime().addShutdownHook(new ShutdownHook(profiler, uploader));
173178
} catch (final IllegalStateException ex) {
174179
// The JVM is already shutting down.
175180
}
176181
} catch (final UnsupportedEnvironmentException | ConfigurationException e) {
182+
log.debug("ANTITHESIS_ASSERT: Failed to initialize profiling agent (unreachable)", e);
183+
Assert.unreachable("Failed to initialize profiling agent!");
177184
ProfilerFlareLogger.getInstance().log("Failed to initialize profiling agent!", e);
178185
ProfilerFlareReporter.reportInitializationException(e);
179186
}

dd-trace-core/src/main/java/datadog/trace/common/writer/PayloadDispatcherImpl.java

Lines changed: 0 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
package datadog.trace.common.writer;
22

3-
import com.antithesis.sdk.Assert;
4-
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
5-
import com.fasterxml.jackson.databind.node.ObjectNode;
63
import datadog.communication.monitor.Monitoring;
74
import datadog.communication.monitor.Recording;
85
import datadog.communication.serialization.ByteBufferConsumer;
@@ -60,17 +57,6 @@ public Collection<RemoteApi> getApis() {
6057

6158
@Override
6259
public void onDroppedTrace(int spanCount) {
63-
// Antithesis: Assert that traces should not be dropped before sending
64-
ObjectNode dropDetails = JsonNodeFactory.instance.objectNode();
65-
dropDetails.put("span_count", spanCount);
66-
dropDetails.put("total_dropped_traces", droppedTraceCount.sum() + 1);
67-
dropDetails.put("total_dropped_spans", droppedSpanCount.sum() + spanCount);
68-
69-
log.debug("ANTITHESIS_ASSERT: Traces dropped before sending (unreachable) - span_count: {}, total_dropped: {}", spanCount, droppedTraceCount.sum() + 1);
70-
Assert.unreachable(
71-
"Traces should not be dropped before attempting to send - indicates buffer overflow or backpressure",
72-
dropDetails);
73-
7460
droppedSpanCount.add(spanCount);
7561
droppedTraceCount.increment();
7662
}
@@ -117,60 +103,19 @@ public void accept(int messageCount, ByteBuffer buffer) {
117103
// the packer calls this when the buffer is full,
118104
// or when the packer is flushed at a heartbeat
119105
if (messageCount > 0) {
120-
// Antithesis: Verify that we're attempting to send traces
121-
log.debug("ANTITHESIS_ASSERT: Trace sending code path exercised (reachable) - message_count: {}", messageCount);
122-
Assert.reachable("Trace sending code path is exercised", null);
123-
log.debug("ANTITHESIS_ASSERT: Checking if traces are being sent to API (sometimes) - message_count: {}", messageCount);
124-
Assert.sometimes(
125-
messageCount > 0,
126-
"Traces are being sent to the API",
127-
null);
128-
129106
batchTimer.reset();
130107
Payload payload = newPayload(messageCount, buffer);
131108
final int sizeInBytes = payload.sizeInBytes();
132109
healthMetrics.onSerialize(sizeInBytes);
133110
RemoteApi.Response response = api.sendSerializedTraces(payload);
134111
mapper.reset();
135112

136-
// Antithesis: Assert that trace sending should always succeed
137-
ObjectNode sendDetails = JsonNodeFactory.instance.objectNode();
138-
sendDetails.put("trace_count", messageCount);
139-
sendDetails.put("payload_size_bytes", sizeInBytes);
140-
sendDetails.put("success", response.success());
141-
response.exception().ifPresent(ex -> {
142-
sendDetails.put("exception", ex.getClass().getName());
143-
sendDetails.put("exception_message", ex.getMessage());
144-
});
145-
response.status().ifPresent(status -> sendDetails.put("http_status", status));
146-
147-
log.debug("ANTITHESIS_ASSERT: Checking trace sending success (always) - success: {}, trace_count: {}", response.success(), messageCount);
148-
Assert.always(
149-
response.success(),
150-
"Trace sending to API should always succeed - no traces should be lost",
151-
sendDetails);
152-
153113
if (response.success()) {
154114
if (log.isDebugEnabled()) {
155115
log.debug("Successfully sent {} traces to the API", messageCount);
156116
}
157117
healthMetrics.onSend(messageCount, sizeInBytes, response);
158118
} else {
159-
// Antithesis: This code path should be unreachable if traces are never lost
160-
ObjectNode failureDetails = JsonNodeFactory.instance.objectNode();
161-
failureDetails.put("trace_count", messageCount);
162-
failureDetails.put("payload_size_bytes", sizeInBytes);
163-
response.exception().ifPresent(ex -> {
164-
failureDetails.put("exception", ex.getClass().getName());
165-
failureDetails.put("exception_message", ex.getMessage());
166-
});
167-
response.status().ifPresent(status -> failureDetails.put("http_status", status));
168-
169-
log.debug("ANTITHESIS_ASSERT: Trace sending failed (unreachable) - trace_count: {}, size: {} bytes", messageCount, sizeInBytes);
170-
Assert.unreachable(
171-
"Trace sending failure path should never be reached - indicates traces are being lost",
172-
failureDetails);
173-
174119
if (log.isDebugEnabled()) {
175120
log.debug(
176121
"Failed to send {} traces of size {} bytes to the API", messageCount, sizeInBytes);

dd-trace-core/src/main/java/datadog/trace/common/writer/RemoteWriter.java

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,6 @@
33
import static datadog.trace.api.sampling.PrioritySampling.UNSET;
44
import static java.util.concurrent.TimeUnit.MINUTES;
55

6-
import com.antithesis.sdk.Assert;
7-
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
8-
import com.fasterxml.jackson.databind.node.ObjectNode;
96
import datadog.trace.core.DDSpan;
107
import datadog.trace.core.monitor.HealthMetrics;
118
import datadog.trace.relocate.api.RatelimitedLogger;
@@ -70,33 +67,9 @@ protected RemoteWriter(
7067

7168
@Override
7269
public void write(final List<DDSpan> trace) {
73-
// Antithesis: Assert that we should never attempt to write when writer is closed
74-
ObjectNode writeAttemptDetails = JsonNodeFactory.instance.objectNode();
75-
writeAttemptDetails.put("writer_closed", closed);
76-
writeAttemptDetails.put("trace_size", trace.size());
77-
writeAttemptDetails.put("has_traces", !trace.isEmpty());
78-
79-
log.debug("ANTITHESIS_ASSERT: Checking writer not closed when writing (always) - closed: {}, trace_size: {}", closed, trace.size());
80-
Assert.always(
81-
!closed,
82-
"Writer should never be closed when attempting to write traces",
83-
writeAttemptDetails);
84-
8570
if (closed) {
8671
// We can't add events after shutdown otherwise it will never complete shutting down.
8772
log.debug("Dropped due to shutdown: {}", trace);
88-
89-
// Antithesis: Track when traces are dropped due to writer being closed
90-
ObjectNode shutdownDetails = JsonNodeFactory.instance.objectNode();
91-
shutdownDetails.put("trace_size", trace.size());
92-
shutdownDetails.put("reason", "writer_closed_during_shutdown");
93-
94-
log.debug("ANTITHESIS_ASSERT: Traces dropped due to shutdown (sometimes) - closed: {}, trace_size: {}", closed, trace.size());
95-
Assert.sometimes(
96-
closed && !trace.isEmpty(),
97-
"Traces are dropped due to writer shutdown - tracking shutdown behavior",
98-
shutdownDetails);
99-
10073
handleDroppedTrace(trace);
10174
} else {
10275
if (trace.isEmpty()) {
@@ -118,18 +91,6 @@ public void write(final List<DDSpan> trace) {
11891
handleDroppedTrace(trace);
11992
break;
12093
case DROPPED_BUFFER_OVERFLOW:
121-
// Antithesis: Buffer overflow should NEVER happen - this indicates a serious problem
122-
ObjectNode overflowDetails = JsonNodeFactory.instance.objectNode();
123-
overflowDetails.put("trace_size", trace.size());
124-
overflowDetails.put("sampling_priority", samplingPriority);
125-
overflowDetails.put("buffer_capacity", traceProcessingWorker.getCapacity());
126-
overflowDetails.put("reason", "buffer_overflow_backpressure");
127-
128-
log.debug("ANTITHESIS_ASSERT: Buffer overflow occurred (unreachable) - trace_size: {}, capacity: {}", trace.size(), traceProcessingWorker.getCapacity());
129-
Assert.unreachable(
130-
"Buffer overflow should never occur - traces are being dropped due to backpressure",
131-
overflowDetails);
132-
13394
if (log.isDebugEnabled()) {
13495
log.debug("Dropped due to a buffer overflow: {}", trace);
13596
} else {

0 commit comments

Comments
 (0)