Skip to content

Commit 21d74c0

Browse files
committed
[add] Included instant internal execution time and instant latency with RTT on reported metrics
1 parent d5c160a commit 21d74c0

File tree

3 files changed

+22
-6
lines changed

3 files changed

+22
-6
lines changed

Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ GOGET=$(GOCMD) get
88
GOMOD=$(GOCMD) mod
99
GOFMT=$(GOCMD) fmt
1010

11-
.PHONY: all test coverage build checkfmt lint fmt
12-
all: test coverage build checkfmt lint fmt
11+
.PHONY: all test coverage build checkfmt fmt
12+
all: test coverage build checkfmt fmt
1313

1414
build:
1515
$(GOBUILD) .

go.mod

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ go 1.13
55
require (
66
github.com/HdrHistogram/hdrhistogram-go v1.0.1
77
github.com/RedisGraph/redisgraph-go v1.0.1-0.20210122150500-aa0feaa960ce
8+
github.com/golangci/golangci-lint v1.35.2 // indirect
89
github.com/gomodule/redigo v2.0.0+incompatible
910
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324
1011
)

redisgraph-bechmark-go.go

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,9 @@ var totalRelationshipsCreated uint64
2929
var totalRelationshipsDeleted uint64
3030

3131
var latencies *hdrhistogram.Histogram
32+
var instantLatencies *hdrhistogram.Histogram
3233
var graphRunTimeLatencies *hdrhistogram.Histogram
34+
var instantGraphRunTimeLatencies *hdrhistogram.Histogram
3335

3436
const Inf = rate.Limit(math.MaxFloat64)
3537

@@ -64,7 +66,11 @@ func sendCmdLogic(rg *redisgraph.Graph, query string, continueOnError bool, debu
6466
} else {
6567
err = graphRunTimeLatencies.RecordValue(int64(queryResult.InternalExecutionTime() * 1000.0))
6668
if err != nil {
67-
log.Fatalf("Received an error while recording RedisGraph RunTime latencies: %v", err)
69+
log.Fatalf("Received an error while recording RedisGraph InternalExecutionTime latencies: %v", err)
70+
}
71+
err = instantGraphRunTimeLatencies.RecordValue(int64(queryResult.InternalExecutionTime() * 1000.0))
72+
if err != nil {
73+
log.Fatalf("Received an error while recording RedisGraph instant (last sec ) InternalExecutionTime latencies: %v", err)
6874
}
6975
if debug_level > 1 {
7076
fmt.Printf("Issued query: %s\n", query)
@@ -86,6 +92,10 @@ func sendCmdLogic(rg *redisgraph.Graph, query string, continueOnError bool, debu
8692
if err != nil {
8793
log.Fatalf("Received an error while recording latencies: %v", err)
8894
}
95+
err = instantLatencies.RecordValue(duration.Microseconds())
96+
if err != nil {
97+
log.Fatalf("Received an error while recording latencies: %v", err)
98+
}
8999
}
90100

91101
func main() {
@@ -118,7 +128,9 @@ func main() {
118128
samplesPerClient := *numberRequests / *clients
119129
client_update_tick := 1
120130
latencies = hdrhistogram.New(1, 90000000, 3)
131+
instantLatencies = hdrhistogram.New(1, 90000000, 3)
121132
graphRunTimeLatencies = hdrhistogram.New(1, 90000000, 3)
133+
instantGraphRunTimeLatencies = hdrhistogram.New(1, 90000000, 3)
122134
connectionStr := fmt.Sprintf("%s:%d", *host, *port)
123135
stopChan := make(chan struct{})
124136
// a WaitGroup for the goroutines to tell us they've stopped
@@ -199,7 +211,7 @@ func updateCLI(tick *time.Ticker, c chan os.Signal, message_limit uint64, loop b
199211
prevTime := time.Now()
200212
prevMessageCount := uint64(0)
201213
messageRateTs := []float64{}
202-
fmt.Printf("%26s %7s %25s %25s %7s %25s %25s %25s\n", "Test time", " ", "Total Commands", "Total Errors", "", "Command Rate", "Client p50 with RTT(ms)", "Graph Internal p50 with RTT(ms)")
214+
fmt.Printf("%26s %7s %25s %25s %7s %25s %25s %26s\n", "Test time", " ", "Total Commands", "Total Errors", "", "Command Rate", "Client p50 with RTT(ms)", "Graph Internal Time p50 (ms)")
203215
for {
204216
select {
205217
case <-tick.C:
@@ -216,7 +228,10 @@ func updateCLI(tick *time.Ticker, c chan os.Signal, message_limit uint64, loop b
216228

217229
p50 := float64(latencies.ValueAtQuantile(50.0)) / 1000.0
218230
p50RunTimeGraph := float64(graphRunTimeLatencies.ValueAtQuantile(50.0)) / 1000.0
219-
231+
instantP50 := float64(instantLatencies.ValueAtQuantile(50.0)) / 1000.0
232+
instantP50RunTimeGraph := float64(instantGraphRunTimeLatencies.ValueAtQuantile(50.0)) / 1000.0
233+
instantGraphRunTimeLatencies.Reset()
234+
instantLatencies.Reset()
220235
if prevMessageCount == 0 && totalCommands != 0 {
221236
start = time.Now()
222237
}
@@ -226,7 +241,7 @@ func updateCLI(tick *time.Ticker, c chan os.Signal, message_limit uint64, loop b
226241
prevMessageCount = totalCommands
227242
prevTime = now
228243

229-
fmt.Printf("%25.0fs %s %25d %25d [%3.1f%%] %25.2f %25.3f %25.3f\t", time.Since(start).Seconds(), completionPercentStr, totalCommands, totalErrors, errorPercent, messageRate, p50, p50RunTimeGraph)
244+
fmt.Printf("%25.0fs %s %25d %25d [%3.1f%%] %25.2f %19.3f (%3.3f) %20.3f (%3.3f)\t", time.Since(start).Seconds(), completionPercentStr, totalCommands, totalErrors, errorPercent, messageRate, instantP50, p50, instantP50RunTimeGraph, p50RunTimeGraph)
230245
fmt.Printf("\r")
231246
if message_limit > 0 && totalCommands >= uint64(message_limit) && !loop {
232247
return true, start, time.Since(start), totalCommands, messageRateTs

0 commit comments

Comments
 (0)