-
Notifications
You must be signed in to change notification settings - Fork 27
Fix multi node test #572
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Fix multi node test #572
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,7 +19,6 @@ | |
| "strconv" | ||
| "strings" | ||
| "sync" | ||
| "sync/atomic" | ||
| "testing" | ||
| "time" | ||
|
|
||
|
|
@@ -130,6 +129,7 @@ | |
| // Log format: [TestName:serviceName] LEVEL: message | ||
| // This provides consistent formatting and ensures all logs are captured by go test. | ||
| UseUnifiedLogger bool | ||
| SettingsContext string | ||
| } | ||
|
|
||
| // JSONError represents a JSON error response from the RPC server. | ||
|
|
@@ -155,13 +155,18 @@ | |
| appSettings *settings.Settings | ||
| ) | ||
|
|
||
| appSettings = settings.NewSettings() // This reads gocore.Config and applies sensible defaults | ||
| if opts.SettingsContext != "" { | ||
| appSettings = settings.NewSettings(opts.SettingsContext) // This reads gocore.Config and applies sensible defaults | ||
| } else { | ||
| appSettings = settings.NewSettings() // This reads gocore.Config and applies sensible defaults | ||
| } | ||
|
|
||
|
|
||
| // Generate a unique context for this TestDaemon to ensure util.GetListener | ||
| // creates unique listeners instead of returning cached ones from another TestDaemon. | ||
| // The counter ensures uniqueness even when tests run in quick succession. | ||
| uniqueID := atomic.AddUint64(&testDaemonCounter, 1) | ||
| appSettings.Context = fmt.Sprintf("%s-testdaemon-%d", appSettings.Context, uniqueID) | ||
| // uniqueID := atomic.AddUint64(&testDaemonCounter, 1) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These commented-out lines that modify Without this code, multiple TestDaemons running concurrently might share cached listeners from Was this intentionally removed? If so, how is test isolation now ensured? |
||
| // appSettings.Context = fmt.Sprintf("%s-testdaemon-%d", appSettings.Context, uniqueID) | ||
|
|
||
| var ( | ||
| listenAddr string | ||
|
|
@@ -246,18 +251,19 @@ | |
| appSettings.Validator.HTTPAddress, _ = url.Parse(clientAddr) | ||
|
|
||
| // P2P - allocate port for libp2p (doesn't support pre-created listeners) | ||
| p2pPort, err := getFreePort() | ||
| require.NoError(t, err) | ||
| appSettings.P2P.StaticPeers = nil | ||
| appSettings.P2P.ListenAddresses = []string{"0.0.0.0"} | ||
| appSettings.P2P.Port = p2pPort | ||
|
|
||
|
|
||
| // P2P gRPC | ||
| // Just set the port from the settings override function | ||
| if opts.EnableP2P { | ||
| var tmpSettings settings.Settings | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This code creates a temporary
Consider either:
|
||
| opts.SettingsOverrideFunc(&tmpSettings) | ||
| // P2P gRPC uses a random port — the specified port is for libp2p | ||
| _, listenAddr, clientAddr, err = util.GetListener(appSettings.Context, "p2p", "", ":0") | ||
| require.NoError(t, err) | ||
| appSettings.P2P.GRPCListenAddress = listenAddr | ||
| appSettings.P2P.GRPCAddress = clientAddr | ||
| appSettings.P2P.Port = tmpSettings.P2P.Port | ||
| appSettings.P2P.ListenAddresses = []string{"0.0.0.0"} | ||
| } | ||
|
|
||
| if opts.EnableBlockPersister { | ||
|
|
@@ -309,7 +315,7 @@ | |
| // Create a unique data directory per test | ||
| // Use test name to ensure each test has its own isolated directory | ||
| testName := strings.ReplaceAll(t.Name(), "/", "_") | ||
| appSettings.ClientName = testName | ||
| // appSettings.ClientName = testName | ||
|
|
||
| // Determine the data directory path | ||
| var path string | ||
|
|
@@ -389,6 +395,8 @@ | |
| opts.SettingsOverrideFunc(appSettings) | ||
| } | ||
|
|
||
|
|
||
|
|
||
| // Initialize container manager for UTXO store if UTXOStoreType is specified | ||
| var containerManager *containers.ContainerManager | ||
| if opts.ContainerManager != nil { | ||
|
|
@@ -789,8 +797,8 @@ | |
| return result | ||
| } | ||
|
|
||
| // getFreePort asks the kernel for a free open port that is ready to use. | ||
| func getFreePort() (int, error) { | ||
| // GetFreePort asks the kernel for a free open port that is ready to use. | ||
| func GetFreePort() (int, error) { | ||
| addr, err := net.ResolveTCPAddr("tcp", "localhost:0") | ||
| if err != nil { | ||
| return 0, err | ||
|
|
@@ -1115,9 +1123,9 @@ | |
| // With GlobalBlockHeightRetention=1, transactions older than 1 block should be pruned. | ||
| // Current height is 10, so we wait for the pruner to complete its cycle. | ||
| t.Logf("Waiting for pruner to complete pruning cycle...") | ||
| prunedHeight, recordsProcessed, err := td.prunerObserver.waitForPrune(timeout) | ||
| prunedHeight, recordsProcessed, clientName, err := td.prunerObserver.waitForPrune(timeout) | ||
| require.NoError(t, err, "Timeout waiting for pruner to complete") | ||
| t.Logf("✓ Pruner completed pruning up to height %d, processed %d records", prunedHeight, recordsProcessed) | ||
| t.Logf("✓ Pruner completed pruning up to height %d, processed %d records, client: %s", prunedHeight, recordsProcessed, clientName) | ||
| } | ||
|
|
||
| // WaitForBlobDeletion waits for the blob deletion worker to complete processing. | ||
|
|
@@ -2240,6 +2248,7 @@ | |
| type pruneEvent struct { | ||
| height uint32 | ||
| recordsProcessed int64 | ||
| clientName string | ||
| } | ||
|
|
||
| type testPrunerObserver struct { | ||
|
|
@@ -2254,21 +2263,21 @@ | |
| } | ||
| } | ||
|
|
||
| func (o *testPrunerObserver) OnPruneComplete(height uint32, recordsProcessed int64) { | ||
| o.t.Logf("✓ Pruner callback invoked for height %d with %d records processed", height, recordsProcessed) | ||
| func (o *testPrunerObserver) OnPruneComplete(height uint32, recordsProcessed int64, clientName string) { | ||
| o.t.Logf("✓ Pruner callback invoked for height %d with %d records processed, client: %s", height, recordsProcessed, clientName) | ||
| select { | ||
| case o.pruneCompleted <- pruneEvent{height: height, recordsProcessed: recordsProcessed}: | ||
| case o.pruneCompleted <- pruneEvent{height: height, recordsProcessed: recordsProcessed, clientName: clientName}: | ||
| default: | ||
| o.t.Logf("Warning: pruneCompleted channel is full, dropping event for height %d", height) | ||
| } | ||
| } | ||
|
|
||
| func (o *testPrunerObserver) waitForPrune(timeout time.Duration) (uint32, int64, error) { | ||
| func (o *testPrunerObserver) waitForPrune(timeout time.Duration) (uint32, int64, string, error) { | ||
| select { | ||
| case event := <-o.pruneCompleted: | ||
| return event.height, event.recordsProcessed, nil | ||
| return event.height, event.recordsProcessed, event.clientName, nil | ||
| case <-time.After(timeout): | ||
| return 0, 0, errors.NewProcessingError("timeout waiting for prune completion") | ||
| return 0, 0, "", errors.NewProcessingError("timeout waiting for prune completion") | ||
| } | ||
| } | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using
url.URLas a map key is problematic. URLs are structs with pointers and slices internally, which means two URLs with identical string representations may not be equal as map keys. This could lead to cache misses where the same URL creates multiple store instances.Consider using the URL string representation as the map key instead: