Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 66 additions & 23 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ var (
ErrFullMemory = errors.New("cannot create new record: memory is full")
)

// Constans below are used for shard section identification.
// Constants below are used for shard section identification.
const (
// SMSH - Small Shards section
SMSH = iota + 1
Expand All @@ -38,7 +38,7 @@ type AtomicCache struct {
// Lookup structure used for global index.
lookup map[string]LookupRecord

// Shards lookup tables which contains information about shards sections.
// Shards lookup tables which contain information about shard sections.
smallShards, mediumShards, largeShards ShardsLookup

// Size of byte array used for memory allocation at small shard section.
Expand All @@ -63,8 +63,8 @@ type AtomicCache struct {
// Garbage collector counter for starter.
GcCounter uint32

// Buffer contains all unattended cache set requests. It has a maximum site
// which is equal to MaxRecords value.
// Buffer contains all unattended cache set requests. It has a maximum size
// which is equal to the MaxRecords value.
buffer []BufferItem
}

Expand All @@ -79,25 +79,25 @@ type ShardsLookup struct {
shardsAvail []int
}

// LookupRecord represents item in lookup table. One record contains index of
// shard and record. So we can determine which shard access and which record of
// shard to get. Record also contains expiration time.
// LookupRecord represents an item in the lookup table. One record contains the index of
// the shard and record. So we can determine which shard to access and which record of
// the shard to get. Record also contains expiration time.
type LookupRecord struct {
RecordIndex int
ShardIndex int
ShardSection int
Expiration time.Time
}

// BufferItem is used for buffer, which contains all unattended cache set
// request.
// BufferItem is used for the buffer, which contains all unattended cache set
// requests.
type BufferItem struct {
Key string
Data []byte
Expire time.Duration
}

// New initialize whole cache memory with one allocated shard.
// New initializes the whole cache memory with one allocated shard.
func New(opts ...Option) *AtomicCache {
var options = &Options{
RecordSizeSmall: 512,
Expand Down Expand Up @@ -138,8 +138,8 @@ func New(opts ...Option) *AtomicCache {
return cache
}

// initShardsSection provides shards sections initialization. So the cache has
// one shard in each section at the begging.
// initShardsSection provides shard section initialization. So the cache has
// one shard in each section at the beginning.
func initShardsSection(shardsSection *ShardsLookup, maxShards, maxRecords, recordSize int) {
var shardIndex int

Expand All @@ -153,10 +153,12 @@ func initShardsSection(shardsSection *ShardsLookup, maxShards, maxRecords, recor
shardsSection.shards[shardIndex] = NewShard(maxRecords, recordSize)
}

// Set store data to cache memory. If key/record is already in memory, then data
// are replaced. If not, it checks if there are some allocated shard with empty
// space for data. If there is no empty space, new shard is allocated. Otherwise
// some valid record (FIFO queue) is deleted and new one is stored.
// Set stores data to cache memory. If the key/record is already in memory, then data
// are replaced. If not, it checks if there is an allocated shard with empty
// space for data. If there is no empty space, a new shard is allocated.
// Remarks:
// - If expiration time is set to 0 then maximum expiration time is used (48 hours).
// - If expiration time is KeepTTL, then current expiration time is preserved.
func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error {
// Reject if data is too large for any shard
if len(data) > int(a.RecordSizeLarge) {
Expand Down Expand Up @@ -283,6 +285,48 @@ func (a *AtomicCache) Get(key string) ([]byte, error) {
return nil, ErrNotFound
}

// Exists checks if record is present in cache memory. It returns true if record
// is present, otherwise false.
func (a *AtomicCache) Exists(key string) bool {
a.RLock()
val, ok := a.lookup[key]
a.RUnlock()
if !ok {
return false
}
// Check expiration
if time.Now().After(val.Expiration) {
return false
}
return true
}

// Delete removes record from cache memory. If record is not found, then error
// is returned. It also releases memory used by record in shard.
// If shard ends up empty, it is released.
func (a *AtomicCache) Delete(key string) error {
a.Lock()
defer a.Unlock()

val, ok := a.lookup[key]
if !ok {
return ErrNotFound
}

shardSection := a.getShardsSectionByID(val.ShardSection)
// Check if the shard at val.ShardIndex is nil. This is a defensive check to
// handle cases where the shard might have been released or not initialized
// due to concurrent modifications or unexpected states.
if shardSection.shards[val.ShardIndex] != nil {
shardSection.shards[val.ShardIndex].Free(val.RecordIndex)
a.releaseShard(val.ShardSection, val.ShardIndex)
delete(a.lookup, key)
return nil
}

return ErrNotFound
}

// releaseShard release shard if there is no record in memory. It returns true
// if shard was released. The function requires the shard section ID and
// shard ID on input.
Expand Down Expand Up @@ -353,9 +397,9 @@ func (a *AtomicCache) getEmptyShard(shardSectionID int) (int, bool) {
return shardIndex, true
}

// getShardsSectionBySize returns shards section lookup structure and section
// identifier as a second value. The function requires the data size value on
// input. If data are bigger than allowed value, then nil and 0 is returned.
// getShardsSectionBySize returns the shard section lookup structure and section
// identifier as a second value. The function requires the data size value as input.
// If data are bigger than the allowed value, then nil and 0 are returned.
// This method is not thread safe and additional locks are required.
func (a *AtomicCache) getShardsSectionBySize(dataSize int) (*ShardsLookup, int) {
if dataSize <= int(a.RecordSizeSmall) {
Expand Down Expand Up @@ -412,10 +456,9 @@ func (a *AtomicCache) getExprTime(expire time.Duration) time.Time {
return time.Now().Add(expire)
}

// collectGarbage provides garbage collect. It goes throught lookup table and
// checks expiration time. If shard end up empty, then garbage collect release
// him, but only if there is more than one shard in charge (we always have one
// active shard).
// collectGarbage provides garbage collection. It goes through the lookup table and
// checks expiration time. If a shard ends up empty, then garbage collection releases
// it, but only if there is more than one shard in use (there is always at least one active shard).
func (a *AtomicCache) collectGarbage() {
a.Lock()
for k, v := range a.lookup {
Expand Down
60 changes: 60 additions & 0 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,66 @@ func TestCacheKeepTTL(t *testing.T) {
}
}

func TestCacheExists(t *testing.T) {
cache := New()
key := "exists-key"
data := []byte("exists-data")

// Should not exist before set
if cache.Exists(key) {
t.Errorf("Exists returned true for unset key")
}

// Set and check exists
if err := cache.Set(key, data, 10*time.Second); err != nil {
t.Fatalf("Set error: %s", err)
}
if !cache.Exists(key) {
t.Errorf("Exists returned false for set key")
}

// Delete and check exists
if err := cache.Delete(key); err != nil {
t.Fatalf("Delete error: %s", err)
}
if cache.Exists(key) {
t.Errorf("Exists returned true after Delete")
}

// Never-set key
if cache.Exists("never-existed") {
t.Errorf("Exists returned true for never-set key")
}
}

func TestCacheDelete(t *testing.T) {
cache := New()
key := "del-key"
data := []byte("to-delete")

// Set and then delete
if err := cache.Set(key, data, 0); err != nil {
t.Fatalf("Set error: %s", err)
}
if err := cache.Delete(key); err != nil {
t.Errorf("Delete error: %s", err)
}
// Should not be able to get deleted key
if _, err := cache.Get(key); err == nil {
t.Errorf("Expected error on Get after Delete, got nil")
}

// Deleting again should return ErrNotFound
if err := cache.Delete(key); err != ErrNotFound {
t.Errorf("Expected ErrNotFound on double Delete, got %v", err)
}

// Deleting a never-set key should return ErrNotFound
if err := cache.Delete("never-existed"); err != ErrNotFound {
t.Errorf("Expected ErrNotFound for never-set key, got %v", err)
}
}

func benchmarkCacheNew(recordCount int, b *testing.B) {
b.ReportAllocs()

Expand Down
Loading