|
12 | 12 | // See the License for the specific language governing permissions and
|
13 | 13 | // limitations under the License.
|
14 | 14 |
|
| 15 | +//go:build !cluster_proxy |
| 16 | + |
15 | 17 | package e2e
|
16 | 18 |
|
17 | 19 | import (
|
18 | 20 | "context"
|
| 21 | + "encoding/json" |
19 | 22 | "testing"
|
| 23 | + "time" |
20 | 24 |
|
21 | 25 | "github.com/stretchr/testify/require"
|
22 | 26 |
|
| 27 | + "go.etcd.io/bbolt" |
| 28 | + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" |
| 29 | + "go.etcd.io/etcd/server/v3/storage/datadir" |
| 30 | + "go.etcd.io/etcd/server/v3/storage/schema" |
23 | 31 | "go.etcd.io/etcd/tests/v3/framework/config"
|
24 | 32 | "go.etcd.io/etcd/tests/v3/framework/e2e"
|
25 | 33 | )
|
@@ -70,3 +78,63 @@ func TestForceNewCluster(t *testing.T) {
|
70 | 78 | })
|
71 | 79 | }
|
72 | 80 | }
|
| 81 | + |
| 82 | +func TestForceNewCluster_MemberCount(t *testing.T) { |
| 83 | + e2e.BeforeTest(t) |
| 84 | + |
| 85 | + ctx := context.Background() |
| 86 | + |
| 87 | + epc, promotedMembers := mustCreateNewClusterByPromotingMembers(t, e2e.CurrentVersion, 3, e2e.WithKeepDataDir(true)) |
| 88 | + require.Len(t, promotedMembers, 2) |
| 89 | + |
| 90 | + // Wait for the backend TXN to sync/commit the data to disk, to ensure |
| 91 | + // the consistent-index is persisted. Another way is to issue a snapshot |
| 92 | + // command to forcibly commit the backend TXN. |
| 93 | + time.Sleep(time.Second) |
| 94 | + |
| 95 | + t.Log("Killing all the members") |
| 96 | + require.NoError(t, epc.Kill()) |
| 97 | + require.NoError(t, epc.Wait(ctx)) |
| 98 | + |
| 99 | + m := epc.Procs[0] |
| 100 | + t.Logf("Forcibly create a one-member cluster with member: %s", m.Config().Name) |
| 101 | + m.Config().Args = append(m.Config().Args, "--force-new-cluster") |
| 102 | + require.NoError(t, m.Start(ctx)) |
| 103 | + |
| 104 | + t.Log("Online checking the member count") |
| 105 | + mresp, merr := m.Etcdctl().MemberList(ctx, false) |
| 106 | + require.NoError(t, merr) |
| 107 | + require.Len(t, mresp.Members, 1) |
| 108 | + |
| 109 | + t.Log("Closing the member") |
| 110 | + require.NoError(t, m.Close()) |
| 111 | + require.NoError(t, m.Wait(ctx)) |
| 112 | + |
| 113 | + t.Log("Offline checking the member count") |
| 114 | + members := mustReadMembersFromBoltDB(t, m.Config().DataDirPath) |
| 115 | + require.Len(t, members, 1) |
| 116 | +} |
| 117 | + |
| 118 | +func mustReadMembersFromBoltDB(t *testing.T, dataDir string) []*membership.Member { |
| 119 | + dbPath := datadir.ToBackendFileName(dataDir) |
| 120 | + db, err := bbolt.Open(dbPath, 0o400, &bbolt.Options{ReadOnly: true}) |
| 121 | + require.NoError(t, err) |
| 122 | + defer func() { |
| 123 | + require.NoError(t, db.Close()) |
| 124 | + }() |
| 125 | + |
| 126 | + var members []*membership.Member |
| 127 | + _ = db.View(func(tx *bbolt.Tx) error { |
| 128 | + b := tx.Bucket(schema.Members.Name()) |
| 129 | + _ = b.ForEach(func(k, v []byte) error { |
| 130 | + m := membership.Member{} |
| 131 | + err := json.Unmarshal(v, &m) |
| 132 | + require.NoError(t, err) |
| 133 | + members = append(members, &m) |
| 134 | + return nil |
| 135 | + }) |
| 136 | + return nil |
| 137 | + }) |
| 138 | + |
| 139 | + return members |
| 140 | +} |
0 commit comments