From 847b1dccd7e8e7cabdae309d090021c8c3e9e1a7 Mon Sep 17 00:00:00 2001 From: PraserX Date: Tue, 22 Jul 2025 10:31:54 +0200 Subject: [PATCH 1/9] feat: add Seti method for updating shard records - Implemented Seti method to update data in shard memory based on index. - Added tests for Seti method, including validation for out-of-bounds index. - Enhanced existing tests for shard functionality. --- shard.go | 9 +++++++++ shard_test.go | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/shard.go b/shard.go index 4a28f2b..b741fc8 100644 --- a/shard.go +++ b/shard.go @@ -41,6 +41,15 @@ func (s *Shard) Set(data []byte) (i int) { return } +// Seti updates data in shard memory based on index. To preserve performance, +// it does not check if index is valid. It is responsibility of caller to ensure +// that index is valid and within bounds of shard. +func (s *Shard) Seti(i int, data []byte) { + s.Lock() // Lock for writing and reading + s.slots[i].Set(data) + s.Unlock() // Unlock for writing and reading +} + // Get returns bytes from shard memory based on index. If array on output is // empty, then record is not exists. func (s *Shard) Get(index int) (v []byte) { diff --git a/shard_test.go b/shard_test.go index a5244cb..f0c3389 100644 --- a/shard_test.go +++ b/shard_test.go @@ -5,6 +5,53 @@ import ( "testing" ) +func TestShardSeti(t *testing.T) { + shard := NewShard(10, 4) + // Use Set to get a valid index, then SetI to update + idx := shard.Set([]byte{0, 0, 0, 0}) + shard.Seti(idx, []byte{1, 2, 3, 4}) + got := shard.Get(idx) + want := []byte{1, 2, 3, 4} + if !reflect.DeepEqual(got, want) { + t.Errorf("SetI: got %v, want %v", got, want) + } + + // SetI with out-of-bounds index (should panic, so recover) + defer func() { + if r := recover(); r == nil { + t.Errorf("SetI should panic for out-of-bounds index") + } + }() + shard.Seti(20, []byte{9, 9, 9, 9}) +} + +func TestShardFreeAndIsEmpty(t *testing.T) { + shard := NewShard(5, 4) + idx := shard.Set([]byte{1, 2, 3, 4}) + if shard.IsEmpty() { + t.Errorf("Shard should not be empty after Set") + } + shard.Free(idx) + if !shard.IsEmpty() { + t.Errorf("Shard should be empty after Free") + } +} + +func TestShardGetSlotsAvail(t *testing.T) { + shard := NewShard(3, 2) + if avail := shard.GetSlotsAvail(); avail != 3 { + t.Errorf("Expected 3 slots available, got %d", avail) + } + idx := shard.Set([]byte{1, 2}) + if avail := shard.GetSlotsAvail(); avail != 2 { + t.Errorf("Expected 2 slots available after Set, got %d", avail) + } + shard.Free(idx) + if avail := shard.GetSlotsAvail(); avail != 3 { + t.Errorf("Expected 3 slots available after Free, got %d", avail) + } +} + func TestShardSimple(t *testing.T) { for _, c := range []struct { recordCount int From d9e85faf815fa57937dfcfceebea8a283e1566e0 Mon Sep 17 00:00:00 2001 From: PraserX Date: Tue, 22 Jul 2025 10:36:55 +0200 Subject: [PATCH 2/9] feat: add KeepTTL constant for record expiration management Introduce KeepTTL constant to allow records to retain their current expiration time when updated. This enhances the flexibility of the Set method in managing cache records. --- cache.go | 82 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 71 insertions(+), 11 deletions(-) diff --git a/cache.go b/cache.go index bddfc47..77437f6 100644 --- a/cache.go +++ b/cache.go @@ -24,6 +24,10 @@ const ( LGSH ) +// KeepTTL is used for setting expiration time to current expiration time. +// It means that record will be updated with the same expiration time. +const KeepTTL = time.Duration(-1) + // AtomicCache structure represents whole cache memory. type AtomicCache struct { // RWMutex is used for access to shards array. @@ -154,50 +158,106 @@ func initShardsSection(shardsSection *ShardsLookup, maxShards, maxRecords, recor // space for data. If there is no empty space, new shard is allocated. Otherwise // some valid record (FIFO queue) is deleted and new one is stored. func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { + // Reject if data is too large for any shard if len(data) > int(a.RecordSizeLarge) { return ErrDataLimit } + // Track if this is a new record and if garbage collection should be triggered new := false collectGarbage := false + + // Select the appropriate shard section based on data size shardSection, shardSectionID := a.getShardsSectionBySize(len(data)) - a.Lock() - if val, ok := a.lookup[key]; !ok { + var ( + exists bool + val LookupRecord + ) + + // Only lock for shared state mutation: check if key exists in lookup + a.RLock() + val, exists = a.lookup[key] + a.RUnlock() + + // Determine expiration time: if KeepTTL and record exists, preserve old + // expiration; otherwise, calculate new. + var expireTime time.Time + if expire == KeepTTL && exists { + expireTime = val.Expiration + } else { + expireTime = a.getExprTime(expire) + } + + if !exists { + // Key is new, will allocate new record new = true } else { if val.ShardSection != shardSectionID { + // Key exists but data size changed: move to new section, free old record. + // Explaination: If the record size changed and data should be stored in a different + // shard section, we need to free the old record and allocate a new record in + // the correct shard section. + a.Lock() shardSection.shards[val.ShardIndex].Free(val.RecordIndex) val.RecordIndex = shardSection.shards[val.ShardIndex].Set(data) - a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: a.getExprTime(expire)} + a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: expireTime} + a.Unlock() } else { - prevShardSection := a.getShardsSectionByID(val.ShardSection) - prevShardSection.shards[val.ShardIndex].Free(val.RecordIndex) - new = true + // Key exists in same section: update existing record. + // Explaination: If the record size is the same, we can simply update the existing record + // in the same shard section without needing to free it first. + // This is more efficient as it avoids unnecessary memory allocation and deallocation. + // This is a performance optimization to avoid unnecessary memory allocation and deallocation. + // It assumes that the record size has not changed and we can safely update it. + a.Lock() + shardSection.shards[val.ShardIndex].Seti(val.RecordIndex, data) + a.Unlock() } } if new { + // Allocate new record: try to find a shard with space, or allocate a new shard, or buffer if full + a.Lock() if si, ok := a.getShard(shardSectionID); ok { + // Found shard with available slot. + // Explaination: If we found a shard with available space, we can simply set the data + // in that shard and update the lookup table with the new record index. + // This avoids unnecessary memory allocation and deallocation, improving performance. ri := shardSection.shards[si].Set(data) - a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)} + a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime} + a.Unlock() } else if si, ok := a.getEmptyShard(shardSectionID); ok { + // No shard with space, allocate new shard. + // Explaination: If there is no shard with available space, we allocate a new shard + // and set the data in that new shard. This is necessary when all existing shards + // are full and we need to create a new shard to accommodate the new record. + // This ensures that we can always store new records, even if it means creating a + // new shard when all existing shards are full. shardSection.shards[si] = NewShard(a.MaxRecords, a.getRecordSizeByShardSectionID(shardSectionID)) ri := shardSection.shards[si].Set(data) - a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)} + a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime} + a.Unlock() } else { - if len(a.buffer) <= int(a.MaxRecords) { + // All shards full, buffer the request or return error if buffer is full. + if len(a.buffer) < int(a.MaxRecords) { + // Buffer the request if there is space in buffer. + // Explaination: If the buffer has space, we can store the request in the buffer + // instead of allocating a new shard. This allows us to handle more requests without + // immediately allocating new memory, which can be more efficient. + // This is useful when the cache is under heavy load and we want to avoid + // allocating new shards for every request. a.buffer = append(a.buffer, BufferItem{Key: key, Data: data, Expire: expire}) + a.Unlock() } else { a.Unlock() return ErrFullMemory } - collectGarbage = true } } - a.Unlock() + // Trigger garbage collection if needed if (atomic.AddUint32(&a.GcCounter, 1) == a.GcStarter) || collectGarbage { atomic.StoreUint32(&a.GcCounter, 0) go a.collectGarbage() From 61770a5c5e124f3d0df8733247b5f32b1e651b31 Mon Sep 17 00:00:00 2001 From: PraserX Date: Tue, 22 Jul 2025 10:41:19 +0200 Subject: [PATCH 3/9] feat: add TestCacheKeepTTL for expiration management Implement a test to verify that updating a cache value with KeepTTL maintains the original expiration time while allowing the value to be updated. --- cache_test.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/cache_test.go b/cache_test.go index f39efb0..c33b967 100644 --- a/cache_test.go +++ b/cache_test.go @@ -140,6 +140,44 @@ func TestCacheFreeAfterExpiration(t *testing.T) { } } +func TestCacheKeepTTL(t *testing.T) { + cache := New() + key := "keep-ttl-key" + data1 := []byte("first") + data2 := []byte("second") + expire := 2 * time.Second + + // Set initial value with expiration + if err := cache.Set(key, data1, expire); err != nil { + t.Fatalf("Set error: %s", err) + } + val, ok := cache.lookup[key] + if !ok { + t.Fatalf("Key not found after Set") + } + origExp := val.Expiration + + // Update value with KeepTTL + if err := cache.Set(key, data2, KeepTTL); err != nil { + t.Fatalf("Set error (KeepTTL): %s", err) + } + val2, ok := cache.lookup[key] + if !ok { + t.Fatalf("Key not found after Set with KeepTTL") + } + if !val2.Expiration.Equal(origExp) { + t.Errorf("Expiration changed: got %v, want %v", val2.Expiration, origExp) + } + // Value should be updated + got, err := cache.Get(key) + if err != nil { + t.Fatalf("Get error: %s", err) + } + if !reflect.DeepEqual(got, data2) { + t.Errorf("Value not updated: got %v, want %v", got, data2) + } +} + func benchmarkCacheNew(recordCount int, b *testing.B) { b.ReportAllocs() From 9f6bfba4c15683d4e4ccfb15afe2ad04d3567e91 Mon Sep 17 00:00:00 2001 From: PraserX Date: Tue, 22 Jul 2025 10:41:35 +0200 Subject: [PATCH 4/9] chore: update benchmark output in README.md for clarity - Added system information to benchmark results for better context. - Updated benchmark results to reflect recent performance measurements. --- README.md | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 640a120..ed9c639 100644 --- a/README.md +++ b/README.md @@ -85,9 +85,13 @@ go test -bench=. -benchmem For this benchmark, memory was created with the following specs: `1024 bytes per record`, `4096 records per shard`, `256 shards (max)`. ``` -BenchmarkCacheNewMedium-12 291 3670372 ns/op 22776481 B/op 12408 allocs/op -BenchmarkCacheSetMedium-12 1928548 620.3 ns/op 63 B/op 1 allocs/op -BenchmarkCacheGetMedium-12 16707145 69.87 ns/op 0 B/op 0 allocs/op +goos: linux +goarch: amd64 +pkg: github.com/praserx/atomic-cache/v2 +cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz +BenchmarkCacheNewMedium-12 288 4109240 ns/op 22750002 B/op 12405 allocs/op +BenchmarkCacheSetMedium-12 4499152 269.8 ns/op 16 B/op 0 allocs/op +BenchmarkCacheGetMedium-12 19747963 59.72 ns/op 0 B/op 0 allocs/op ``` *If you want do some special bencharking, go ahead.* @@ -96,18 +100,26 @@ BenchmarkCacheGetMedium-12 16707145 69.87 ns/op 0 B/op **SET** ``` -BenchmarkAtomicCacheSet-12 2921170 413.0 ns/op 55 B/op 2 allocs/op -BenchmarkBigCacheSet-12 3448020 345.5 ns/op 0 B/op 0 allocs/op -BenchmarkFreeCacheSet-12 4777364 217.2 ns/op 65 B/op 1 allocs/op -BenchmarkHashicorpCacheSet-12 6208528 202.2 ns/op 65 B/op 3 allocs/op +goos: linux +goarch: amd64 +pkg: github.com/praserx/atomic-cache/v2 +cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz +BenchmarkAtomicCacheSet-12 5755452 195.3 ns/op 27 B/op 1 allocs/op +BenchmarkBigCacheSet-12 4290684 286.9 ns/op 0 B/op 0 allocs/op +BenchmarkFreeCacheSet-12 5806412 199.3 ns/op 65 B/op 1 allocs/op +BenchmarkHashicorpCacheSet-12 6333306 170.0 ns/op 65 B/op 3 allocs/op ``` **GET** ``` -BenchmarkAtomicCacheGet-12 9697010 121.7 ns/op 0 B/op 0 allocs/op -BenchmarkBigCacheGet-12 4031352 295.3 ns/op 88 B/op 2 allocs/op -BenchmarkFreeCacheGet-12 4813386 276.8 ns/op 88 B/op 2 allocs/op -BenchmarkHashicorpCacheGet-12 11071472 107.4 ns/op 16 B/op 1 allocs/op +goos: linux +goarch: amd64 +pkg: github.com/praserx/atomic-cache/v2 +cpu: Intel(R) Core(TM) i7-10850H CPU @ 2.70GHz +BenchmarkAtomicCacheGet-12 13004460 97.27 ns/op 0 B/op 0 allocs/op +BenchmarkBigCacheGet-12 4403041 272.5 ns/op 88 B/op 2 allocs/op +BenchmarkFreeCacheGet-12 5586747 231.9 ns/op 88 B/op 2 allocs/op +BenchmarkHashicorpCacheGet-12 11445339 99.70 ns/op 16 B/op 1 allocs/op ``` ## License From d26bc8ccb2090270a76a5737997ecfb05b3edcc4 Mon Sep 17 00:00:00 2001 From: Praser <24474722+praserx@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:48:33 +0200 Subject: [PATCH 5/9] chore: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cache.go b/cache.go index 77437f6..a8cf2ff 100644 --- a/cache.go +++ b/cache.go @@ -242,7 +242,7 @@ func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { // All shards full, buffer the request or return error if buffer is full. if len(a.buffer) < int(a.MaxRecords) { // Buffer the request if there is space in buffer. - // Explaination: If the buffer has space, we can store the request in the buffer + // Explanation: If the buffer has space, we can store the request in the buffer // instead of allocating a new shard. This allows us to handle more requests without // immediately allocating new memory, which can be more efficient. // This is useful when the cache is under heavy load and we want to avoid From 9bad9c101af78a0415b2e456e41cd9fb3fed7757 Mon Sep 17 00:00:00 2001 From: Praser <24474722+praserx@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:50:17 +0200 Subject: [PATCH 6/9] chore: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cache.go b/cache.go index a8cf2ff..0e35d0c 100644 --- a/cache.go +++ b/cache.go @@ -195,7 +195,7 @@ func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { } else { if val.ShardSection != shardSectionID { // Key exists but data size changed: move to new section, free old record. - // Explaination: If the record size changed and data should be stored in a different + // Explanation: If the record size changed and data should be stored in a different // shard section, we need to free the old record and allocate a new record in // the correct shard section. a.Lock() From 3d849f07bd61f3689ec93aef3d44e1548cbee4d4 Mon Sep 17 00:00:00 2001 From: Praser <24474722+praserx@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:50:28 +0200 Subject: [PATCH 7/9] chore: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cache.go b/cache.go index 0e35d0c..46ea32e 100644 --- a/cache.go +++ b/cache.go @@ -205,7 +205,7 @@ func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { a.Unlock() } else { // Key exists in same section: update existing record. - // Explaination: If the record size is the same, we can simply update the existing record + // Explanation: If the record size is the same, we can simply update the existing record // in the same shard section without needing to free it first. // This is more efficient as it avoids unnecessary memory allocation and deallocation. // This is a performance optimization to avoid unnecessary memory allocation and deallocation. From 55d33a0317db4422b201a1ec7172635b9ede0077 Mon Sep 17 00:00:00 2001 From: Praser <24474722+praserx@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:50:40 +0200 Subject: [PATCH 8/9] chore: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cache.go b/cache.go index 46ea32e..d630ceb 100644 --- a/cache.go +++ b/cache.go @@ -221,7 +221,7 @@ func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { a.Lock() if si, ok := a.getShard(shardSectionID); ok { // Found shard with available slot. - // Explaination: If we found a shard with available space, we can simply set the data + // Explanation: If we found a shard with available space, we can simply set the data // in that shard and update the lookup table with the new record index. // This avoids unnecessary memory allocation and deallocation, improving performance. ri := shardSection.shards[si].Set(data) From ce5cdcd4774d6c11b35a72272356167c7244b578 Mon Sep 17 00:00:00 2001 From: Praser <24474722+praserx@users.noreply.github.com> Date: Tue, 22 Jul 2025 10:50:54 +0200 Subject: [PATCH 9/9] chore: fix typo Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cache.go b/cache.go index d630ceb..7510c47 100644 --- a/cache.go +++ b/cache.go @@ -229,7 +229,7 @@ func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error { a.Unlock() } else if si, ok := a.getEmptyShard(shardSectionID); ok { // No shard with space, allocate new shard. - // Explaination: If there is no shard with available space, we allocate a new shard + // Explanation: If there is no shard with available space, we allocate a new shard // and set the data in that new shard. This is necessary when all existing shards // are full and we need to create a new shard to accommodate the new record. // This ensures that we can always store new records, even if it means creating a