Skip to content

Commit d374130

Browse files
Thomas StrombergThomas Stromberg
authored andcommitted
New API, who dis?
1 parent a5c1153 commit d374130

File tree

26 files changed

+1079
-830
lines changed

26 files changed

+1079
-830
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ As a stupid-fast in-memory cache:
3434
import "github.com/codeGROOVE-dev/sfcache"
3535

3636
// strings as keys, ints as values
37-
cache := sfcache.Memory[string, int]()
37+
cache := sfcache.New[string, int]()
3838
cache.Set("answer", 42)
3939
val, found := cache.Get("answer")
4040
```
@@ -48,7 +48,7 @@ import (
4848
)
4949

5050
p, _ := localfs.New[string, User]("myapp", "")
51-
cache, _ := sfcache.Persistent[string, User](ctx, p)
51+
cache, _ := sfcache.NewTiered[string, User](p)
5252

5353
cache.SetAsync(ctx, "user:123", user) // Don't wait for the key to persist
5454
cache.Store.Len(ctx) // Access persistence layer directly
@@ -60,7 +60,7 @@ A persistent cache suitable for Cloud Run or local development; uses Cloud Datas
6060
import "github.com/codeGROOVE-dev/sfcache/pkg/persist/cloudrun"
6161

6262
p, _ := cloudrun.New[string, User](ctx, "myapp")
63-
cache, _ := sfcache.Persistent[string, User](ctx, p)
63+
cache, _ := sfcache.NewTiered[string, User](p)
6464
```
6565

6666
## Performance against the Competition

benchmarks/benchmark_test.go

Lines changed: 189 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,18 @@ package benchmarks
33

44
import (
55
"encoding/binary"
6+
"encoding/json"
67
"fmt"
7-
"math"
8-
"math/rand/v2"
8+
"os"
9+
"os/exec"
910
"strconv"
1011
"sync"
1112
"sync/atomic"
1213
"testing"
1314
"time"
1415

1516
"github.com/codeGROOVE-dev/sfcache"
17+
"github.com/codeGROOVE-dev/sfcache/benchmarks/pkg/workload"
1618
"github.com/coocood/freecache"
1719
"github.com/dgraph-io/ristretto"
1820
lru "github.com/hashicorp/golang-lru/v2"
@@ -24,6 +26,13 @@ import (
2426
// Full Benchmark Suite
2527
// =============================================================================
2628

29+
func TestMemoryOverhead(t *testing.T) {
30+
if testing.Short() {
31+
t.Skip("skipping memory benchmark in short mode")
32+
}
33+
runMemoryBenchmark(t)
34+
}
35+
2736
// TestBenchmarkSuite runs the 5 key benchmarks for tracking sfcache performance.
2837
// Run with: go test -run=TestBenchmarkSuite -v
2938
func TestBenchmarkSuite(t *testing.T) {
@@ -54,6 +63,10 @@ func TestBenchmarkSuite(t *testing.T) {
5463
// 5. Synthetic hit rate with Zipf distribution
5564
printTestHeader("TestHitRate", "Zipf Hit Rate")
5665
runHitRateBenchmark()
66+
67+
// 6. Memory Overhead
68+
printTestHeader("TestMemoryOverhead", "Memory Usage (10k items, 1KB values)")
69+
runMemoryBenchmark(t)
5770
}
5871

5972
func printTestHeader(testName, description string) {
@@ -125,7 +138,7 @@ func runHitRateBenchmark() {
125138
fmt.Println("| Cache | Size=1% | Size=2.5% | Size=5% |")
126139
fmt.Println("|---------------|---------|-----------|---------|")
127140

128-
workload := generateWorkload(hitRateWorkload, hitRateKeySpace, hitRateAlpha, 42)
141+
workload := workload.GenerateZipfInt(hitRateWorkload, hitRateKeySpace, hitRateAlpha, 42)
129142
cacheSizes := []int{10000, 25000, 50000}
130143

131144
caches := []struct {
@@ -198,52 +211,8 @@ func printHitRateSummary(results []hitRateResult) {
198211
}
199212
}
200213

201-
func generateWorkload(n, keySpace int, theta float64, seed uint64) []int {
202-
rng := rand.New(rand.NewPCG(seed, seed+1))
203-
keys := make([]int, n)
204-
205-
// Use YCSB-style Zipf distribution (matches CockroachDB/go-cache-benchmark exactly)
206-
// The external benchmark uses iMin=0, iMax=keySpace, so spread = iMax+1-iMin = keySpace+1
207-
spread := keySpace + 1
208-
209-
// Precompute zeta values using spread (not keySpace)
210-
zeta2 := computeZeta(2, theta)
211-
zetaN := computeZeta(uint64(spread), theta)
212-
alpha := 1.0 / (1.0 - theta)
213-
eta := (1 - math.Pow(2.0/float64(spread), 1.0-theta)) / (1.0 - zeta2/zetaN)
214-
halfPowTheta := 1.0 + math.Pow(0.5, theta)
215-
216-
for i := range n {
217-
u := rng.Float64()
218-
uz := u * zetaN
219-
var result int
220-
switch {
221-
case uz < 1.0:
222-
result = 0
223-
case uz < halfPowTheta:
224-
result = 1
225-
default:
226-
result = int(float64(spread) * math.Pow(eta*u-eta+1.0, alpha))
227-
}
228-
if result >= keySpace {
229-
result = keySpace - 1
230-
}
231-
keys[i] = result
232-
}
233-
return keys
234-
}
235-
236-
// computeZeta calculates zeta(n, theta) = sum(1/i^theta) for i=1 to n
237-
func computeZeta(n uint64, theta float64) float64 {
238-
sum := 0.0
239-
for i := uint64(1); i <= n; i++ {
240-
sum += 1.0 / math.Pow(float64(i), theta)
241-
}
242-
return sum
243-
}
244-
245214
func hitRateSFCache(workload []int, cacheSize int) float64 {
246-
cache := sfcache.Memory[int, int](sfcache.WithSize(cacheSize))
215+
cache := sfcache.New[int, int](sfcache.Size(cacheSize))
247216
var hits int
248217
for _, key := range workload {
249218
if _, found := cache.Get(key); found {
@@ -443,7 +412,7 @@ func measurePerf(name string, getFn, setFn func(b *testing.B)) perfResult {
443412
}
444413

445414
func benchSFCacheGet(b *testing.B) {
446-
cache := sfcache.Memory[int, int](sfcache.WithSize(perfCacheSize))
415+
cache := sfcache.New[int, int](sfcache.Size(perfCacheSize))
447416
for i := range perfCacheSize {
448417
cache.Set(i, i)
449418
}
@@ -454,7 +423,7 @@ func benchSFCacheGet(b *testing.B) {
454423
}
455424

456425
func benchSFCacheSet(b *testing.B) {
457-
cache := sfcache.Memory[int, int](sfcache.WithSize(perfCacheSize))
426+
cache := sfcache.New[int, int](sfcache.Size(perfCacheSize))
458427
b.ResetTimer()
459428
for i := range b.N {
460429
cache.Set(i%perfCacheSize, i)
@@ -636,7 +605,7 @@ const (
636605

637606
func runZipfThroughputBenchmark(threads int) {
638607
// Generate Zipf workload once for all caches
639-
workload := generateWorkload(zipfWorkloadSize, perfCacheSize, zipfAlpha, 42)
608+
workload := workload.GenerateZipfInt(zipfWorkloadSize, perfCacheSize, zipfAlpha, 42)
640609

641610
caches := []string{"sfcache", "otter", "ristretto", "tinylfu", "freecache", "lru"}
642611

@@ -681,7 +650,7 @@ func measureZipfQPS(cacheName string, threads int, workload []int) float64 {
681650

682651
switch cacheName {
683652
case "sfcache":
684-
cache := sfcache.Memory[int, int](sfcache.WithSize(perfCacheSize))
653+
cache := sfcache.New[int, int](sfcache.Size(perfCacheSize))
685654
for i := range perfCacheSize {
686655
cache.Set(i, i)
687656
}
@@ -866,3 +835,171 @@ func measureZipfQPS(cacheName string, threads int, workload []int) float64 {
866835

867836
return float64(ops.Load()) / concurrentDuration.Seconds()
868837
}
838+
839+
// =============================================================================
840+
841+
// Memory Overhead Implementation
842+
843+
// =============================================================================
844+
845+
// =============================================================================
846+
847+
// Memory Overhead Implementation (External Process)
848+
849+
// =============================================================================
850+
851+
type runnerOutput struct {
852+
Name string `json:"name"`
853+
854+
Items int `json:"items"`
855+
856+
Bytes uint64 `json:"bytes"`
857+
}
858+
859+
func runMemoryBenchmark(t *testing.T) {
860+
861+
fmt.Println()
862+
863+
fmt.Println("### Memory Usage (32k cap, 32k unique items, 1KB values) - Isolated Processes")
864+
865+
fmt.Println(" Workload: Repeated Access to force admission and fill capacity")
866+
867+
fmt.Println()
868+
869+
fmt.Println("| Cache | Items Stored | Memory (MB) | Overhead vs Map (bytes/item) |")
870+
871+
fmt.Println("|---------------|--------------|-------------|------------------------------|")
872+
873+
874+
875+
876+
877+
878+
879+
caches := []string{"mem_sfcache", "mem_otter", "mem_ristretto", "mem_tinylfu", "mem_freecache", "mem_lru"}
880+
881+
results := make([]runnerOutput, len(caches))
882+
883+
884+
885+
for i, name := range caches {
886+
887+
results[i] = buildAndRun(t, name)
888+
889+
}
890+
891+
892+
893+
// Sort by memory usage ascending
894+
895+
for i := range len(results) - 1 {
896+
897+
for j := i + 1; j < len(results); j++ {
898+
899+
if results[j].Bytes < results[i].Bytes {
900+
901+
results[i], results[j] = results[j], results[i]
902+
903+
}
904+
905+
}
906+
907+
}
908+
909+
910+
911+
for _, r := range results {
912+
913+
// Run baseline with exact same number of items for fair comparison
914+
915+
baseline := buildAndRun(t, "mem_baseline", "-target", strconv.Itoa(r.Items))
916+
917+
918+
919+
// Calculate overhead relative to baseline
920+
921+
diff := int64(r.Bytes) - int64(baseline.Bytes)
922+
923+
924+
925+
var overheadPerItem int64
926+
927+
if r.Items > 0 {
928+
929+
overheadPerItem = diff / int64(r.Items)
930+
931+
}
932+
933+
934+
935+
mb := float64(r.Bytes) / 1024 / 1024
936+
937+
938+
939+
fmt.Printf("| %s | %12d | %8.2f MB | %28d |\n",
940+
941+
formatCacheName(r.Name), r.Items, mb, overheadPerItem)
942+
943+
}
944+
945+
fmt.Println()
946+
947+
}
948+
949+
950+
951+
func buildAndRun(t *testing.T, cmdDir string, args ...string) runnerOutput {
952+
953+
// Binary name = cmdDir (e.g., mem_sfcache)
954+
955+
binName := "./" + cmdDir + ".bin"
956+
957+
srcDir := "./cmd/" + cmdDir
958+
959+
960+
961+
// Build
962+
963+
buildCmd := exec.Command("go", "build", "-o", binName, srcDir)
964+
965+
if out, err := buildCmd.CombinedOutput(); err != nil {
966+
967+
t.Fatalf("failed to build %s: %v\n%s", srcDir, err, out)
968+
969+
}
970+
971+
defer os.Remove(binName)
972+
973+
974+
975+
// Run
976+
977+
runArgs := append([]string{"-iter", "250000", "-cap", "32768"}, args...)
978+
979+
runCmd := exec.Command(binName, runArgs...)
980+
981+
out, err := runCmd.CombinedOutput()
982+
983+
984+
985+
if err != nil {
986+
987+
t.Fatalf("failed to run %s: %v\nOutput: %s", binName, err, out)
988+
989+
}
990+
991+
992+
993+
var res runnerOutput
994+
995+
if err := json.Unmarshal(out, &res); err != nil {
996+
997+
t.Fatalf("failed to parse output for %s: %v\nOutput: %s", binName, err, out)
998+
999+
}
1000+
1001+
return res
1002+
1003+
}
1004+
1005+

0 commit comments

Comments
 (0)