@@ -36,6 +36,7 @@ import (
3636 "github.com/prometheus/prometheus/tsdb"
3737 "go.opentelemetry.io/otel/attribute"
3838 "go.opentelemetry.io/otel/trace"
39+ "go.uber.org/atomic"
3940 "golang.org/x/exp/slices"
4041 "google.golang.org/grpc"
4142 "google.golang.org/grpc/codes"
@@ -141,6 +142,9 @@ type Handler struct {
141142 writeTimeseriesTotal * prometheus.HistogramVec
142143 writeE2eLatency * prometheus.HistogramVec
143144
145+ pendingWriteRequests prometheus.Gauge
146+ pendingWriteRequestsCounter atomic.Int32
147+
144148 Limiter * Limiter
145149}
146150
@@ -231,6 +235,12 @@ func NewHandler(logger log.Logger, o *Options) *Handler {
231235 Buckets : []float64 {1 , 5 , 10 , 20 , 30 , 40 , 50 , 60 , 90 , 120 , 300 , 600 , 900 , 1200 , 1800 , 3600 },
232236 }, []string {"code" , "tenant" , "rollup" },
233237 ),
238+ pendingWriteRequests : promauto .With (registerer ).NewGauge (
239+ prometheus.GaugeOpts {
240+ Name : "thanos_receive_pending_write_requests" ,
241+ Help : "The number of pending write requests." ,
242+ },
243+ ),
234244 }
235245
236246 h .forwardRequests .WithLabelValues (labelSuccess )
@@ -1076,6 +1086,9 @@ func (h *Handler) RemoteWrite(ctx context.Context, r *storepb.WriteRequest) (*st
10761086 span , ctx := tracing .StartSpan (ctx , "receive_grpc" )
10771087 defer span .Finish ()
10781088
1089+ h .pendingWriteRequests .Set (float64 (h .pendingWriteRequestsCounter .Add (1 )))
1090+ defer h .pendingWriteRequestsCounter .Add (- 1 )
1091+
10791092 _ , err := h .handleRequest (ctx , uint64 (r .Replica ), r .Tenant , & prompb.WriteRequest {Timeseries : r .Timeseries })
10801093 if err != nil {
10811094 level .Debug (h .logger ).Log ("msg" , "failed to handle request" , "err" , err )
0 commit comments