@@ -5,6 +5,7 @@ package datastore
55
66import (
77 "context"
8+ "fmt"
89 "log/slog"
910 "os"
1011 "strconv"
@@ -993,8 +994,6 @@ func Test_FindWaitingMachine_NoConcurrentModificationErrors(t *testing.T) {
993994 },
994995 },
995996 } {
996- initEntity := initEntity
997-
998997 err := sharedDS .createEntity (initEntity .table , initEntity .entity )
999998 require .NoError (t , err )
1000999
@@ -1004,8 +1003,7 @@ func Test_FindWaitingMachine_NoConcurrentModificationErrors(t *testing.T) {
10041003 }()
10051004 }
10061005
1007- for i := 0 ; i < 100 ; i ++ {
1008- i := i
1006+ for i := range 100 {
10091007 wg .Add (1 )
10101008
10111009 log := root .With ("worker" , i )
@@ -1014,7 +1012,7 @@ func Test_FindWaitingMachine_NoConcurrentModificationErrors(t *testing.T) {
10141012 defer wg .Done ()
10151013
10161014 for {
1017- machine , err := sharedDS .FindWaitingMachine (context .Background (), "project" , "partition" , size , nil )
1015+ machine , err := sharedDS .FindWaitingMachine (context .Background (), "project" , "partition" , size , nil , metal . RoleMachine )
10181016 if err != nil {
10191017 if metal .IsConflict (err ) {
10201018 t .Errorf ("concurrent modification occurred, shared mutex is not working" )
@@ -1061,3 +1059,121 @@ func Test_FindWaitingMachine_NoConcurrentModificationErrors(t *testing.T) {
10611059
10621060 assert .Equal (t , 100 , count )
10631061}
1062+
1063+ func Test_FindWaitingMachine_RackSpreadingDistribution (t * testing.T ) {
1064+ var (
1065+ partitionID = "partition"
1066+ projectID = "project"
1067+ size1 = metal.Size {Base : metal.Base {ID : "1" }}
1068+ fiveRacks = func (i int ) string {
1069+ return "rack-" + strconv .FormatInt (int64 ((i % 5 )+ 1 ), 10 )
1070+ }
1071+ )
1072+
1073+ defer func () {
1074+ _ , err := sharedDS .machineTable ().Delete ().RunWrite (sharedDS .session )
1075+ require .NoError (t , err )
1076+ _ , err = sharedDS .eventTable ().Delete ().RunWrite (sharedDS .session )
1077+ require .NoError (t , err )
1078+ }()
1079+
1080+ for i := range 200 {
1081+ err := sharedDS .createEntity (sharedDS .machineTable (), & metal.Machine {
1082+ Base : metal.Base {
1083+ ID : strconv .Itoa (i ),
1084+ },
1085+ PartitionID : partitionID ,
1086+ SizeID : size1 .ID ,
1087+ State : metal.MachineState {
1088+ Value : metal .AvailableState ,
1089+ },
1090+ Waiting : true ,
1091+ PreAllocated : false ,
1092+ RackID : fiveRacks (i ),
1093+ })
1094+ require .NoError (t , err )
1095+
1096+ err = sharedDS .createEntity (sharedDS .eventTable (), & metal.ProvisioningEventContainer {
1097+ Base : metal.Base {
1098+ ID : strconv .Itoa (i ),
1099+ },
1100+ Liveliness : metal .MachineLivelinessAlive ,
1101+ })
1102+ require .NoError (t , err )
1103+ }
1104+
1105+ // just allocate some machines with different specs that should not influence later allocs
1106+ for i , spec := range []struct {
1107+ role metal.Role
1108+ size string
1109+ }{
1110+ {role : metal .RoleFirewall , size : "firewall" },
1111+ {role : metal .RoleFirewall , size : "firewall" },
1112+ {role : metal .RoleMachine , size : "machine" },
1113+ {role : metal .RoleMachine , size : "machine" },
1114+ // just to prove that it affects the algorithm:
1115+ // {role: metal.RoleMachine, size: size1.ID},
1116+ } {
1117+ err := sharedDS .createEntity (sharedDS .machineTable (), & metal.Machine {
1118+ Base : metal.Base {
1119+ ID : "allocated-" + strconv .Itoa (i ),
1120+ },
1121+ PartitionID : partitionID ,
1122+ SizeID : spec .size ,
1123+ State : metal.MachineState {
1124+ Value : metal .AvailableState ,
1125+ },
1126+ Allocation : & metal.MachineAllocation {
1127+ Project : projectID ,
1128+ Role : spec .role ,
1129+ },
1130+ RackID : fiveRacks (i ),
1131+ })
1132+ require .NoError (t , err )
1133+
1134+ err = sharedDS .createEntity (sharedDS .eventTable (), & metal.ProvisioningEventContainer {
1135+ Base : metal.Base {
1136+ ID : "allocated-" + strconv .Itoa (i ),
1137+ },
1138+ Liveliness : metal .MachineLivelinessAlive ,
1139+ })
1140+ require .NoError (t , err )
1141+ }
1142+
1143+ for range 100 {
1144+ machine , err := sharedDS .FindWaitingMachine (context .Background (), projectID , partitionID , size1 , nil , metal .RoleMachine )
1145+ require .NoError (t , err )
1146+
1147+ newMachine := * machine
1148+ newMachine .PreAllocated = false
1149+ newMachine .Allocation = & metal.MachineAllocation {
1150+ Project : projectID ,
1151+ }
1152+ newMachine .Allocation .Role = metal .RoleMachine
1153+ newMachine .SizeID = size1 .ID
1154+
1155+ err = sharedDS .updateEntity (sharedDS .machineTable (), & newMachine , machine )
1156+ if err != nil {
1157+ t .Errorf ("unable to update machine: %s" , err )
1158+ }
1159+
1160+ t .Logf ("machine %s allocated in %s" , newMachine .ID , newMachine .RackID )
1161+ }
1162+
1163+ var ms metal.Machines
1164+ err := sharedDS .SearchMachines (& MachineSearchQuery {AllocationProject : & projectID , SizeID : & size1 .ID , PartitionID : & partitionID }, & ms )
1165+ require .NoError (t , err )
1166+
1167+ require .Len (t , ms , 100 )
1168+
1169+ machinesByRack := map [string ]int {}
1170+ for _ , m := range ms {
1171+ machinesByRack [m .RackID ]++
1172+ }
1173+
1174+ for id , count := range machinesByRack {
1175+ assert .Equal (t , 100 / 5 , count , "uneven machine distribution in %s" , id )
1176+ }
1177+
1178+ fmt .Println (machinesByRack )
1179+ }
0 commit comments