Skip to content

Commit 2900e34

Browse files
committed
Moves to Lagrange interpolation and adds a http-based demo
1 parent f755d00 commit 2900e34

22 files changed

+2423
-365
lines changed

blind-auction/auction.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,11 @@ func AuctionDataFromChunk(chunk [IBFChunkSize]byte) *AuctionData {
3737

3838
// AuctionDataFromMessage creates auction data binding a message to its bid weight.
3939
// Used by clients when preparing bids for the next round.
40-
func AuctionDataFromMessage(msg []byte, weight uint32) *AuctionData {
40+
func AuctionDataFromMessage(msg []byte, weight uint32, bytesToElement int) *AuctionData {
4141
return &AuctionData{
4242
MessageHash: sha256.Sum256(msg),
4343
Weight: weight,
44-
Size: uint32(len(msg)), // Size in bytes for bandwidth allocation
44+
Size: uint32((len(msg) + bytesToElement - 1) / bytesToElement), // Size in bytes for bandwidth allocation
4545
}
4646
}
4747

blind-auction/ibf.go

Lines changed: 15 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package blind_auction
33
import (
44
"crypto/sha256"
55
"encoding/binary"
6-
"encoding/hex"
76
"errors"
87
"fmt"
98
"math/big"
@@ -39,7 +38,7 @@ func IBFVectorSize(nBuckets uint32) uint32 {
3938
// IBFVector implements a multi-level Invertible Bloom Filter for distributed auction scheduling.
4039
// The IBF is secret-shared across servers and reconstructed after threshold decryption.
4140
type IBFVector struct {
42-
Chunks [IBFNChunks][][IBFChunkSize]byte
41+
Chunks [IBFNChunks][]big.Int
4342
Counters [IBFNChunks][]uint64
4443
}
4544

@@ -49,7 +48,7 @@ func (v *IBFVector) String() string {
4948
for level := range v.Chunks {
5049
res += fmt.Sprintf("L%d: ", level)
5150
for chunk := range v.Chunks[level] {
52-
res += hex.EncodeToString(v.Chunks[level][chunk][:])
51+
res += v.Chunks[level][chunk].String()
5352
res += fmt.Sprintf(" (%d)", v.Counters[level][chunk])
5453
res += "\n"
5554
}
@@ -65,7 +64,7 @@ func NewIBFVector(messageSlots uint32) *IBFVector {
6564
fac := 1.0
6665
for level := range res.Chunks {
6766
slotsInLevel := int(float64(messageSlots) * fac)
68-
res.Chunks[level] = make([][IBFChunkSize]byte, slotsInLevel)
67+
res.Chunks[level] = make([]big.Int, slotsInLevel)
6968
res.Counters[level] = make([]uint64, slotsInLevel)
7069
fac *= IBFShrinkFactor
7170
}
@@ -91,9 +90,7 @@ func (v *IBFVector) InsertChunk(msg [IBFChunkSize]byte) {
9190
for level := 0; level < IBFNChunks; level++ {
9291
index := ChunkIndex(msg, level, len(v.Chunks[level]))
9392

94-
currentEl := ChunkToElement(v.Chunks[level][index])
95-
crypto.FieldAddInplace(currentEl, msgAsEl, crypto.AuctionFieldOrder)
96-
v.Chunks[level][index] = ElementToChunk(currentEl)
93+
crypto.FieldAddInplace(&v.Chunks[level][index], msgAsEl, crypto.AuctionFieldOrder)
9794
v.Counters[level][index] += 1
9895
}
9996
}
@@ -104,7 +101,7 @@ func (v *IBFVector) EncodeAsFieldElements() []*big.Int {
104101
res := []*big.Int{}
105102
for level := range v.Chunks {
106103
for chunk := range v.Chunks[level] {
107-
res = append(res, ChunkToElement(v.Chunks[level][chunk]))
104+
res = append(res, new(big.Int).Set(&v.Chunks[level][chunk]))
108105
}
109106
}
110107

@@ -121,7 +118,7 @@ func (v *IBFVector) DecodeFromElements(elements []*big.Int) *IBFVector {
121118
index := uint32(0)
122119
for level := range v.Chunks {
123120
for chunk := range v.Chunks[level] {
124-
v.Chunks[level][chunk] = ElementToChunk(elements[index])
121+
v.Chunks[level][chunk].Set(elements[index])
125122
index += 1
126123
}
127124
}
@@ -138,6 +135,7 @@ func (v *IBFVector) DecodeFromElements(elements []*big.Int) *IBFVector {
138135

139136
// ChunkIndex computes the bucket index for a chunk at a specific IBF level.
140137
func ChunkIndex(chunk [IBFChunkSize]byte, level int, itemsInLevel int) uint64 {
138+
// TODO: this is really not very fast
141139
dataToHash := append([]byte(fmt.Sprintf("%d", level)), chunk[:]...)
142140
innerIndexSeed := sha256.Sum256(dataToHash)
143141
return uint64(binary.BigEndian.Uint64(innerIndexSeed[0:8])) % uint64(itemsInLevel)
@@ -151,11 +149,11 @@ func (v *IBFVector) Recover() ([][IBFChunkSize]byte, error) {
151149

152150
// Deep copy chunks and counters
153151
for level := range v.Chunks {
154-
workingCopy.Chunks[level] = make([][IBFChunkSize]byte, len(v.Chunks[level]))
152+
workingCopy.Chunks[level] = make([]big.Int, len(v.Chunks[level]))
155153
workingCopy.Counters[level] = make([]uint64, len(v.Counters[level]))
156154

157155
for i := range v.Chunks[level] {
158-
copy(workingCopy.Chunks[level][i][:], v.Chunks[level][i][:])
156+
workingCopy.Chunks[level][i].Set(&v.Chunks[level][i])
159157
workingCopy.Counters[level][i] = v.Counters[level][i]
160158
}
161159
}
@@ -166,6 +164,8 @@ func (v *IBFVector) Recover() ([][IBFChunkSize]byte, error) {
166164
// Keep track of whether we made progress in the current iteration
167165
madeProgress := true
168166

167+
chunkEl := new(big.Int)
168+
169169
// Continue peeling until no more progress can be made
170170
for madeProgress {
171171
madeProgress = false
@@ -176,11 +176,11 @@ func (v *IBFVector) Recover() ([][IBFChunkSize]byte, error) {
176176
// Found a pure cell
177177
if workingCopy.Counters[level][i] == 1 {
178178
// Get the chunk from this cell
179-
chunk := workingCopy.Chunks[level][i]
179+
chunkEl.Set(&workingCopy.Chunks[level][i])
180+
chunk := ElementToChunk(chunkEl)
180181

181182
// Record this chunk as recovered
182183
recovered = append(recovered, chunk)
183-
chunkAsEl := ChunkToElement(chunk)
184184

185185
// Remove this chunk from all levels to continue peeling
186186
for innerLevel := range workingCopy.Chunks {
@@ -192,10 +192,7 @@ func (v *IBFVector) Recover() ([][IBFChunkSize]byte, error) {
192192
}
193193

194194
// Remove the chunk from this cell
195-
currentEl := ChunkToElement(workingCopy.Chunks[innerLevel][innerIndex])
196-
crypto.FieldSubInplace(currentEl, chunkAsEl, crypto.AuctionFieldOrder)
197-
workingCopy.Chunks[innerLevel][innerIndex] = ElementToChunk(currentEl)
198-
195+
crypto.FieldSubInplace(&workingCopy.Chunks[innerLevel][innerIndex], chunkEl, crypto.AuctionFieldOrder)
199196
workingCopy.Counters[innerLevel][innerIndex]--
200197
}
201198

@@ -222,7 +219,7 @@ func (v *IBFVector) Bytes() []byte {
222219
res = binary.BigEndian.AppendUint32(res, uint32(len(v.Chunks[0])))
223220
for level := range v.Chunks {
224221
for chunk := range v.Chunks[level] {
225-
res = append(res, v.Chunks[level][chunk][:]...)
222+
res = append(res, v.Chunks[level][chunk].Bytes()...)
226223
}
227224
}
228225

blind-auction/ibf_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ func TestIBFVectorRecovery(t *testing.T) {
1414
ibf := NewIBFVector(100)
1515

1616
// Generate a few random chunks to insert
17-
chunks := make([][IBFChunkSize]byte, 5)
17+
chunks := make([][IBFChunkSize]byte, 20)
1818
for i := range chunks {
1919
rand.Read(chunks[i][:])
2020
}
@@ -26,7 +26,7 @@ func TestIBFVectorRecovery(t *testing.T) {
2626

2727
// Recover chunks from the IBF
2828
recovered, err := ibf.Recover()
29-
require.NoError(t, err)
29+
require.NoError(t, err, ibf)
3030

3131
// Verify all chunks were recovered
3232
if len(recovered) != len(chunks) {

crypto/fields.go

Lines changed: 24 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -4,50 +4,52 @@ import (
44
"math/big"
55
)
66

7-
var prfSeed *big.Int
8-
9-
// MessageFieldOrder defines the finite field order for message operations (513 bits).
7+
// MessageFieldOrder defines the finite field order for message operations
108
var MessageFieldOrder *big.Int
119

12-
// AuctionFieldOrder defines the finite field order for auction operations (384 bits).
10+
// AuctionFieldOrder defines the finite field order for auction operations
1311
var AuctionFieldOrder *big.Int
1412

1513
func init() {
16-
// 513 bits so that we can encode 512 bits of data in a chunk
14+
// 513 bits so that we can encode 512 bits of data in a chunk for 10k clients, deg(5) and 10 servers
1715
MessageFieldOrder, _ = big.NewInt(0).SetString("23551861483160902848625974283278945001376208178765538238759867299042020937974421928051251754596306387970642948144090145836318438166833376091610669188604919", 10)
18-
AuctionFieldOrder, _ = big.NewInt(0).SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) // 384 bits ring
19-
prfSeed, _ = big.NewInt(0).SetString("21384347777672109934322149984740494809390049493978212797410708129763158788480720729944469031881574875043683705480707297038846484696174296250022771335208983", 10)
16+
17+
AuctionFieldOrder, _ = big.NewInt(0).SetString("63275151763513965838163916473346901052322656945674817744137239911918558788929646550175002249326583566310537778017647", 16) // 385 bits prime
2018
}
2119

2220
// FieldAddInplace performs modular addition in-place: l = (l + r) mod fieldOrder.
2321
// The result is stored in l and also returned.
24-
// TODO: bench & optimize
2522
func FieldAddInplace(l *big.Int, r *big.Int, fieldOrder *big.Int) *big.Int {
2623
l.Add(l, r)
27-
l.Mod(l, fieldOrder)
24+
if l.Cmp(fieldOrder) > 0 {
25+
l.Sub(l, fieldOrder)
26+
}
27+
28+
if l.Sign() < 0 {
29+
l.Add(l, fieldOrder)
30+
}
31+
2832
/*
29-
for l.Cmp(fieldOrder) > 1 {
30-
l = l.Sub(l, fieldOrder)
33+
if l.Cmp(fieldOrder) > 0 {
34+
panic("l + r > 2*field")
3135
}
32-
for l.Sign() < 0 {
33-
l = l.Add(l, fieldOrder)
36+
if l.Sign() < 0 {
37+
panic("l - r < 2*field")
3438
}
3539
*/
40+
3641
return l
3742
}
3843

3944
// FieldSubInplace performs modular subtraction in-place: l = (l - r) mod fieldOrder.
4045
// The result is stored in l and also returned.
41-
// TODO: bench & optimize
4246
func FieldSubInplace(l *big.Int, r *big.Int, fieldOrder *big.Int) *big.Int {
4347
l.Sub(l, r)
44-
l.Mod(l, fieldOrder)
45-
/*
46-
for l.Sign() < 0 {
47-
l.Add(l, fieldOrder)
48-
}
49-
for l.Cmp(fieldOrder) > 1 {
50-
l.Sub(l, fieldOrder)
51-
}*/
48+
if l.Cmp(fieldOrder) > 0 {
49+
l.Sub(l, fieldOrder)
50+
}
51+
if l.Sign() < 0 {
52+
l.Add(l, fieldOrder)
53+
}
5254
return l
5355
}

0 commit comments

Comments
 (0)