Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import ch.qos.logback.classic.spi.ILoggingEvent;

import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.Predicate;

Expand All @@ -11,22 +12,32 @@
*/
public class DefaultLoggingScope implements LoggingScope {

// note: not ignored and not queued means it should be printed
protected final Predicate<ILoggingEvent> queuePredicate;
protected final Predicate<ILoggingEvent> ignorePredicate;

protected final long timestamp = System.currentTimeMillis();

protected boolean failure = false;

protected ConcurrentLinkedQueue<ILoggingEvent> queue = new ConcurrentLinkedQueue<ILoggingEvent>();
protected final LoggingScopeFlushMode flushMode;

protected ConcurrentLinkedQueue<ILoggingEvent> events = new ConcurrentLinkedQueue<ILoggingEvent>();

public DefaultLoggingScope(Predicate<ILoggingEvent> queuePredicate, Predicate<ILoggingEvent> ignorePredicate) {
public DefaultLoggingScope(Predicate<ILoggingEvent> queuePredicate, Predicate<ILoggingEvent> ignorePredicate, LoggingScopeFlushMode flushMode) {
this.queuePredicate = queuePredicate;
this.ignorePredicate = ignorePredicate;
this.flushMode = flushMode;
}

@Override
public ConcurrentLinkedQueue<ILoggingEvent> getEvents() {
return queue;
public Queue<ILoggingEvent> getEvents() {
if(flushMode == LoggingScopeFlushMode.LAZY && !failure) {
// filter unwanted events
events.removeIf(queuePredicate);
}

return events;
}

@Override
Expand All @@ -35,10 +46,16 @@ public boolean append(ILoggingEvent eventObject) {
return true;
}

if(flushMode == LoggingScopeFlushMode.LAZY) {
// queue for later processing
events.add(eventObject);
return true;
}

if(!failure) {
if(queuePredicate.test(eventObject)) {
// log this event later or not at all
queue.add(eventObject);
events.add(eventObject);
return true;
}
}
Expand All @@ -53,11 +70,11 @@ public boolean isFailure() {

@Override
public void failure() {
// TODO flush buffer here?
this.failure = true;
}

public long getTimestamp() {
return timestamp;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import ch.qos.logback.classic.spi.ILoggingEvent;

import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.function.Predicate;

/**
Expand All @@ -13,8 +12,8 @@ public class LogLevelLoggingScope extends DefaultLoggingScope {

private final Predicate<ILoggingEvent> logLevelFailurePredicate;

public LogLevelLoggingScope(Predicate<ILoggingEvent> queuePredicate, Predicate<ILoggingEvent> ignorePredicate, Predicate<ILoggingEvent> logLevelFailurePredicate) {
super(queuePredicate, ignorePredicate);
public LogLevelLoggingScope(Predicate<ILoggingEvent> queuePredicate, Predicate<ILoggingEvent> ignorePredicate, Predicate<ILoggingEvent> logLevelFailurePredicate, LoggingScopeFlushMode flushMode) {
super(queuePredicate, ignorePredicate, flushMode);
this.logLevelFailurePredicate = logLevelFailurePredicate;
}

Expand All @@ -23,6 +22,16 @@ public boolean append(ILoggingEvent eventObject) {
return true;
}

if(flushMode == LoggingScopeFlushMode.LAZY) {
if(!failure && logLevelFailurePredicate.test(eventObject)) {
failure();
}

// queue for later processing
events.add(eventObject);
return true;
}

if(!failure) {
if(logLevelFailurePredicate.test(eventObject)) {
failure();
Expand All @@ -32,7 +41,7 @@ public boolean append(ILoggingEvent eventObject) {
}
if(queuePredicate.test(eventObject)) {
// log this event later or not at all
queue.add(eventObject);
events.add(eventObject);
return true;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import ch.qos.logback.classic.spi.ILoggingEvent;

import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;

/**
Expand All @@ -11,7 +12,7 @@
*/
public interface LoggingScope {

ConcurrentLinkedQueue<ILoggingEvent> getEvents();
Queue<ILoggingEvent> getEvents();

boolean append(ILoggingEvent eventObject);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import java.util.ArrayList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;

public class LoggingScopeAsyncAppender extends MdcAsyncAppender implements LoggingScopeSink {
Expand Down Expand Up @@ -41,7 +42,7 @@ public LoggingScope getCurrentScope() {
}

public void write(LoggingScope scope) {
ConcurrentLinkedQueue<ILoggingEvent> events = scope.getEvents();
Queue<ILoggingEvent> events = scope.getEvents();
for (ILoggingEvent eventObject : events) {
postProcess(eventObject);
put(eventObject);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
package no.entur.logging.cloud.appender.scope;

/**
*
* Configure when to flush log statements; some log statements should always be written (i.e. error messages),
* so technically do not need to be cached. But in case there is other log statements which end up being written,
* log statements appear out of order even within a single request.<br><br>
*
* Log accumulation tools will normally sort the log statements on the timestamp, so then order does not matter.
* But for local development (i.e. printing to console) logging out-of-order quickly gets very messy.
*
*/

public enum LoggingScopeFlushMode {

/**
*
* Flush log statements as soon it is determined they should be written.
* Results in out-of-order log statements. Uses less memory.
*
*/
EAGER("eager"),

/**
*
* Flush when the on-demand scope closes, i.e. after all log statements have been made.
* Results in-order log statements. Uses more memory.
*
*/
LAZY("lazy");

private final String id;

LoggingScopeFlushMode(String id) {
this.id = id;
}

public String getId() {
return id;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
*
* Interface for performing additional work on log event markers if they are to be logged (not dropped).
*
* This work will be performed on the same thread which collected the log events.
* This work will be performed on the same thread which collected the log events, i.e. or at least a "worker" thread.
*
* In other words, this interface is intended to offload heavy work from the async logging thread.
*
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
package org.entur.example.web;

import no.entur.logging.cloud.logback.logstash.test.CompositeConsoleOutputControl;
import no.entur.logging.cloud.logback.logstash.test.CompositeConsoleOutputControlClosable;
import org.entur.example.web.rest.MyEntity;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.boot.test.web.server.LocalServerPort;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.test.context.TestPropertySource;

import static org.assertj.core.api.Assertions.assertThat;

/**
*
* Test additional logging due to a log statement with high log level.
*
*/

@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT)
@TestPropertySource(properties = {
"entur.logging.http.ondemand.enabled=true",
"entur.logging.http.ondemand.failure.http.enabled=false",
"entur.logging.http.ondemand.failure.logger.level=error",
"entur.logging.http.ondemand.flushMode=lazy",
})
public class OndemandWebLoggingHttpOkHighLogLevelLazyTest {

@LocalServerPort
private int randomServerPort;

@Autowired
private TestRestTemplate restTemplate;

@Test
public void useHumanReadablePlainEncoderExpectFullLoggingInOrder() {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}

@Test
public void useHumanReadablePlainEncoderExpectReducedLoggingInOrder() {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method/infoLoggingOnly", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}

@Test
public void useHumanReadableJsonEncoderExpectFullLoggingInOrder() {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

try (CompositeConsoleOutputControlClosable c = CompositeConsoleOutputControl.useHumanReadableJsonEncoder()) {
ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}
}



@Test
public void useHumanReadableJsonEncoderExpectReducedLoggingInOrder() throws InterruptedException {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

try (CompositeConsoleOutputControlClosable c = CompositeConsoleOutputControl.useHumanReadableJsonEncoder()) {
ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method/infoLoggingOnly", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}
}

@Test
public void useMachineReadableJsonEncoderExpectFullLoggingInOrder() {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

try (CompositeConsoleOutputControlClosable c = CompositeConsoleOutputControl.useMachineReadableJsonEncoder()) {
ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}
}

@Test
public void useMachineReadableJsonEncoderExpectReducedLoggingInOrder() throws InterruptedException {
MyEntity entity = new MyEntity();
entity.setName("Entur");
entity.setSecret("mySecret");

try (CompositeConsoleOutputControlClosable c = CompositeConsoleOutputControl.useMachineReadableJsonEncoder()) {
ResponseEntity<MyEntity> response = restTemplate.postForEntity("/api/document/some/method/infoLoggingOnly", entity, MyEntity.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public GrpcLoggingScopeContextInterceptor grpcLoggingScopeContextInterceptor(Ond
}
}

GrpcContextLoggingScopeFactory grpcContextLoggingScopeFactory = new GrpcContextLoggingScopeFactory();
GrpcContextLoggingScopeFactory grpcContextLoggingScopeFactory = new GrpcContextLoggingScopeFactory(properties.getFlushMode());

GrpcLoggingScopeContextInterceptor interceptor = GrpcLoggingScopeContextInterceptor
.newBuilder()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package no.entur.logging.cloud.spring.ondemand.grpc.properties;

import no.entur.logging.cloud.appender.scope.LoggingScopeFlushMode;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.core.Ordered;

Expand All @@ -21,6 +22,8 @@ public class OndemandProperties {

private List<OndemandPath> paths = new ArrayList<>();

private LoggingScopeFlushMode flushMode = LoggingScopeFlushMode.EAGER;

public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
Expand Down Expand Up @@ -68,4 +71,12 @@ public int getInterceptorOrder() {
public void setInterceptorOrder(int interceptorOrder) {
this.interceptorOrder = interceptorOrder;
}

public LoggingScopeFlushMode getFlushMode() {
return flushMode;
}

public void setFlushMode(LoggingScopeFlushMode flushMode) {
this.flushMode = flushMode;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,20 @@

public class GrpcContextLoggingScopeFactory implements LoggingScopeFactory<LoggingScope>, LoggingScopeProvider {

protected final LoggingScopeFlushMode flushMode;

public GrpcContextLoggingScopeFactory(LoggingScopeFlushMode flushMode) {
this.flushMode = flushMode;

}

@Override
public LoggingScope openScope(Predicate<ILoggingEvent> queuePredicate, Predicate<ILoggingEvent> ignorePredicate, Predicate<ILoggingEvent> logLevelFailurePredicate) {
LoggingScope scope;
if(logLevelFailurePredicate == null) {
scope = new DefaultLoggingScope(queuePredicate, ignorePredicate);
scope = new DefaultLoggingScope(queuePredicate, ignorePredicate, flushMode);
} else {
scope = new LogLevelLoggingScope(queuePredicate, ignorePredicate, logLevelFailurePredicate);
scope = new LogLevelLoggingScope(queuePredicate, ignorePredicate, logLevelFailurePredicate, flushMode);
}

return scope;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,12 @@ public void close(Status status, Metadata trailers) {
try {
if (filter.getGrpcStatusPredicate().test(status)) {
// was there an error response
sink.write(scope);
} else if(scope.isFailure()) {
// there some dangerous error message
sink.write(scope);
scope.failure();
} else if(filter.hasFailureDuration() && System.currentTimeMillis() - scope. getTimestamp() > filter.getFailureDuration()) {
sink.write(scope);
scope.failure();
}

sink.write(scope);
} finally {
factory.closeScope(scope); // this is really a noop operation
}
Expand Down
Loading