Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 23 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,13 @@ go test -bench=. -benchmem
For this benchmark, memory was created with the following specs: `1024 bytes per record`, `4096 records per shard`, `256 shards (max)`.

```
BenchmarkCacheNewMedium-12 291 3670372 ns/op 22776481 B/op 12408 allocs/op
BenchmarkCacheSetMedium-12 1928548 620.3 ns/op 63 B/op 1 allocs/op
BenchmarkCacheGetMedium-12 16707145 69.87 ns/op 0 B/op 0 allocs/op
goos: linux
goarch: amd64
pkg: github.com/praserx/atomic-cache/v2
cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz
BenchmarkCacheNewMedium-12 288 4109240 ns/op 22750002 B/op 12405 allocs/op
BenchmarkCacheSetMedium-12 4499152 269.8 ns/op 16 B/op 0 allocs/op
BenchmarkCacheGetMedium-12 19747963 59.72 ns/op 0 B/op 0 allocs/op
```

*If you want do some special bencharking, go ahead.*
Expand All @@ -96,18 +100,26 @@ BenchmarkCacheGetMedium-12 16707145 69.87 ns/op 0 B/op

**SET**
```
BenchmarkAtomicCacheSet-12 2921170 413.0 ns/op 55 B/op 2 allocs/op
BenchmarkBigCacheSet-12 3448020 345.5 ns/op 0 B/op 0 allocs/op
BenchmarkFreeCacheSet-12 4777364 217.2 ns/op 65 B/op 1 allocs/op
BenchmarkHashicorpCacheSet-12 6208528 202.2 ns/op 65 B/op 3 allocs/op
goos: linux
goarch: amd64
pkg: github.com/praserx/atomic-cache/v2
cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz
BenchmarkAtomicCacheSet-12 5755452 195.3 ns/op 27 B/op 1 allocs/op
BenchmarkBigCacheSet-12 4290684 286.9 ns/op 0 B/op 0 allocs/op
BenchmarkFreeCacheSet-12 5806412 199.3 ns/op 65 B/op 1 allocs/op
BenchmarkHashicorpCacheSet-12 6333306 170.0 ns/op 65 B/op 3 allocs/op
```

**GET**
```
BenchmarkAtomicCacheGet-12 9697010 121.7 ns/op 0 B/op 0 allocs/op
BenchmarkBigCacheGet-12 4031352 295.3 ns/op 88 B/op 2 allocs/op
BenchmarkFreeCacheGet-12 4813386 276.8 ns/op 88 B/op 2 allocs/op
BenchmarkHashicorpCacheGet-12 11071472 107.4 ns/op 16 B/op 1 allocs/op
goos: linux
goarch: amd64
pkg: github.com/praserx/atomic-cache/v2
cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz
BenchmarkAtomicCacheGet-12 13004460 97.27 ns/op 0 B/op 0 allocs/op
BenchmarkBigCacheGet-12 4403041 272.5 ns/op 88 B/op 2 allocs/op
BenchmarkFreeCacheGet-12 5586747 231.9 ns/op 88 B/op 2 allocs/op
BenchmarkHashicorpCacheGet-12 11445339 99.70 ns/op 16 B/op 1 allocs/op
```

## License
Expand Down
82 changes: 71 additions & 11 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ const (
LGSH
)

// KeepTTL is used for setting expiration time to current expiration time.
// It means that record will be updated with the same expiration time.
const KeepTTL = time.Duration(-1)

// AtomicCache structure represents whole cache memory.
type AtomicCache struct {
// RWMutex is used for access to shards array.
Expand Down Expand Up @@ -154,50 +158,106 @@ func initShardsSection(shardsSection *ShardsLookup, maxShards, maxRecords, recor
// space for data. If there is no empty space, new shard is allocated. Otherwise
// some valid record (FIFO queue) is deleted and new one is stored.
func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error {
// Reject if data is too large for any shard
if len(data) > int(a.RecordSizeLarge) {
return ErrDataLimit
}

// Track if this is a new record and if garbage collection should be triggered
new := false
collectGarbage := false

// Select the appropriate shard section based on data size
shardSection, shardSectionID := a.getShardsSectionBySize(len(data))

a.Lock()
if val, ok := a.lookup[key]; !ok {
var (
exists bool
val LookupRecord
)

// Only lock for shared state mutation: check if key exists in lookup
a.RLock()
val, exists = a.lookup[key]
a.RUnlock()

// Determine expiration time: if KeepTTL and record exists, preserve old
// expiration; otherwise, calculate new.
var expireTime time.Time
if expire == KeepTTL && exists {
expireTime = val.Expiration
} else {
expireTime = a.getExprTime(expire)
}

if !exists {
// Key is new, will allocate new record
new = true
} else {
if val.ShardSection != shardSectionID {
// Key exists but data size changed: move to new section, free old record.
// Explanation: If the record size changed and data should be stored in a different
// shard section, we need to free the old record and allocate a new record in
// the correct shard section.
a.Lock()
shardSection.shards[val.ShardIndex].Free(val.RecordIndex)
val.RecordIndex = shardSection.shards[val.ShardIndex].Set(data)
a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: a.getExprTime(expire)}
a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: expireTime}
a.Unlock()
} else {
prevShardSection := a.getShardsSectionByID(val.ShardSection)
prevShardSection.shards[val.ShardIndex].Free(val.RecordIndex)
new = true
// Key exists in same section: update existing record.
// Explanation: If the record size is the same, we can simply update the existing record
// in the same shard section without needing to free it first.
// This is more efficient as it avoids unnecessary memory allocation and deallocation.
// This is a performance optimization to avoid unnecessary memory allocation and deallocation.
// It assumes that the record size has not changed and we can safely update it.
a.Lock()
shardSection.shards[val.ShardIndex].Seti(val.RecordIndex, data)
a.Unlock()
}
}

if new {
// Allocate new record: try to find a shard with space, or allocate a new shard, or buffer if full
a.Lock()
if si, ok := a.getShard(shardSectionID); ok {
// Found shard with available slot.
// Explanation: If we found a shard with available space, we can simply set the data
// in that shard and update the lookup table with the new record index.
// This avoids unnecessary memory allocation and deallocation, improving performance.
ri := shardSection.shards[si].Set(data)
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)}
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime}
a.Unlock()
} else if si, ok := a.getEmptyShard(shardSectionID); ok {
// No shard with space, allocate new shard.
// Explanation: If there is no shard with available space, we allocate a new shard
// and set the data in that new shard. This is necessary when all existing shards
// are full and we need to create a new shard to accommodate the new record.
// This ensures that we can always store new records, even if it means creating a
// new shard when all existing shards are full.
shardSection.shards[si] = NewShard(a.MaxRecords, a.getRecordSizeByShardSectionID(shardSectionID))
ri := shardSection.shards[si].Set(data)
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)}
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime}
a.Unlock()
} else {
if len(a.buffer) <= int(a.MaxRecords) {
// All shards full, buffer the request or return error if buffer is full.
if len(a.buffer) < int(a.MaxRecords) {
// Buffer the request if there is space in buffer.
// Explanation: If the buffer has space, we can store the request in the buffer
// instead of allocating a new shard. This allows us to handle more requests without
// immediately allocating new memory, which can be more efficient.
// This is useful when the cache is under heavy load and we want to avoid
// allocating new shards for every request.
a.buffer = append(a.buffer, BufferItem{Key: key, Data: data, Expire: expire})
a.Unlock()
} else {
a.Unlock()
return ErrFullMemory
}

collectGarbage = true
}
}
a.Unlock()

// Trigger garbage collection if needed
if (atomic.AddUint32(&a.GcCounter, 1) == a.GcStarter) || collectGarbage {
atomic.StoreUint32(&a.GcCounter, 0)
go a.collectGarbage()
Expand Down
38 changes: 38 additions & 0 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,44 @@ func TestCacheFreeAfterExpiration(t *testing.T) {
}
}

func TestCacheKeepTTL(t *testing.T) {
cache := New()
key := "keep-ttl-key"
data1 := []byte("first")
data2 := []byte("second")
expire := 2 * time.Second

// Set initial value with expiration
if err := cache.Set(key, data1, expire); err != nil {
t.Fatalf("Set error: %s", err)
}
val, ok := cache.lookup[key]
if !ok {
t.Fatalf("Key not found after Set")
}
origExp := val.Expiration

// Update value with KeepTTL
if err := cache.Set(key, data2, KeepTTL); err != nil {
t.Fatalf("Set error (KeepTTL): %s", err)
}
val2, ok := cache.lookup[key]
if !ok {
t.Fatalf("Key not found after Set with KeepTTL")
}
if !val2.Expiration.Equal(origExp) {
t.Errorf("Expiration changed: got %v, want %v", val2.Expiration, origExp)
}
// Value should be updated
got, err := cache.Get(key)
if err != nil {
t.Fatalf("Get error: %s", err)
}
if !reflect.DeepEqual(got, data2) {
t.Errorf("Value not updated: got %v, want %v", got, data2)
}
}

func benchmarkCacheNew(recordCount int, b *testing.B) {
b.ReportAllocs()

Expand Down
9 changes: 9 additions & 0 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,15 @@ func (s *Shard) Set(data []byte) (i int) {
return
}

// Seti updates data in shard memory based on index. To preserve performance,
// it does not check if index is valid. It is responsibility of caller to ensure
// that index is valid and within bounds of shard.
func (s *Shard) Seti(i int, data []byte) {
s.Lock() // Lock for writing and reading
s.slots[i].Set(data)
s.Unlock() // Unlock for writing and reading
}

// Get returns bytes from shard memory based on index. If array on output is
// empty, then record is not exists.
func (s *Shard) Get(index int) (v []byte) {
Expand Down
47 changes: 47 additions & 0 deletions shard_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,53 @@ import (
"testing"
)

func TestShardSeti(t *testing.T) {
shard := NewShard(10, 4)
// Use Set to get a valid index, then SetI to update
idx := shard.Set([]byte{0, 0, 0, 0})
shard.Seti(idx, []byte{1, 2, 3, 4})
got := shard.Get(idx)
want := []byte{1, 2, 3, 4}
if !reflect.DeepEqual(got, want) {
t.Errorf("SetI: got %v, want %v", got, want)
}

// SetI with out-of-bounds index (should panic, so recover)
defer func() {
if r := recover(); r == nil {
t.Errorf("SetI should panic for out-of-bounds index")
}
}()
shard.Seti(20, []byte{9, 9, 9, 9})
}

func TestShardFreeAndIsEmpty(t *testing.T) {
shard := NewShard(5, 4)
idx := shard.Set([]byte{1, 2, 3, 4})
if shard.IsEmpty() {
t.Errorf("Shard should not be empty after Set")
}
shard.Free(idx)
if !shard.IsEmpty() {
t.Errorf("Shard should be empty after Free")
}
}

func TestShardGetSlotsAvail(t *testing.T) {
shard := NewShard(3, 2)
if avail := shard.GetSlotsAvail(); avail != 3 {
t.Errorf("Expected 3 slots available, got %d", avail)
}
idx := shard.Set([]byte{1, 2})
if avail := shard.GetSlotsAvail(); avail != 2 {
t.Errorf("Expected 2 slots available after Set, got %d", avail)
}
shard.Free(idx)
if avail := shard.GetSlotsAvail(); avail != 3 {
t.Errorf("Expected 3 slots available after Free, got %d", avail)
}
}

func TestShardSimple(t *testing.T) {
for _, c := range []struct {
recordCount int
Expand Down
Loading