This commit is contained in:
unknown
2026-02-04 20:27:13 +08:00
commit 3b042d1dad
9410 changed files with 1488147 additions and 0 deletions

View File

@@ -0,0 +1,414 @@
package ttlcache
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"math"
"runtime"
"sync"
"sync/atomic"
"time"
)
var SharedInt64Cache = NewBigCache[int64]()
// GCStats GC 统计信息
type GCStats struct {
ExpiredCount int // 过期数量
TotalCount int // 总数量
GCTime int64 // GC 耗时(微秒)
ExpireRate float64 // 过期率
}
// Cache TTL缓存
// 最大的缓存时间为30 * 86400
// Piece数据结构
//
// Piece1 | Piece2 | Piece3 | ...
// [ Item1, Item2, ... ] | ...
type Cache[T any] struct {
isDestroyed bool
pieces []*Piece[T]
countPieces uint64
maxItems int
maxPiecesPerGC int
gcPieceIndex int
// 优化GC 统计和自适应
totalWrites int64
totalExpires int64
lastGCStats GCStats
gcStatsLocker sync.Mutex
// 优化:自适应 GC 配置
gcBaseInterval time.Duration
gcMinInterval time.Duration
gcMaxInterval time.Duration
adaptiveGC bool
gcSampleSize int
}
func NewBigCache[T any]() *Cache[T] {
var delta = memutils.SystemMemoryGB() / 2
if delta <= 0 {
delta = 1
}
return NewCache[T](NewMaxItemsOption(delta * 1_000_000))
}
// calculateOptimalPieces 计算最优分片数量
func calculateOptimalPieces(minPieces, maxPieces int) int {
// 基础分片数 = CPU核心数 * 2
baseShards := runtime.NumCPU() * 2
// 根据内存调整(内存越大,分片越多)
memoryGB := memutils.SystemMemoryGB()
if memoryGB >= 64 {
baseShards *= 2 // 大内存系统,增加分片
} else if memoryGB >= 32 {
baseShards = baseShards * 3 / 2 // 中等内存
}
// 限制范围
if baseShards < minPieces {
baseShards = minPieces
} else if baseShards > maxPieces {
baseShards = maxPieces
}
// 对齐到 2 的幂次(优化取模运算)
countPieces := 1 << uint(math.Ceil(math.Log2(float64(baseShards))))
// 确保在范围内
if countPieces < minPieces {
countPieces = minPieces
} else if countPieces > maxPieces {
// 找到小于等于 maxPieces 的最大 2 的幂次
countPieces = 1 << uint(math.Floor(math.Log2(float64(maxPieces))))
}
return countPieces
}
func NewCache[T any](opt ...OptionInterface) *Cache[T] {
// 优化:动态计算分片数量
var countPieces = 256
var maxItems = 1_000_000
var minPieces = 64
var maxPieces = 1024
var totalMemory = memutils.SystemMemoryGB()
if totalMemory < 2 {
// 我们限制内存过小的服务能够使用的数量
maxItems = 500_000
} else {
var delta = totalMemory / 4
if delta > 0 {
maxItems *= delta
}
}
// 优化:从配置读取分片数(如果未配置则自动计算)
var piecesFromConfig = 0
for _, option := range opt {
if option == nil {
continue
}
switch o := option.(type) {
case *PiecesOption:
if o.Count > 0 {
piecesFromConfig = o.Count
}
case *MaxItemsOption:
if o.Count > 0 {
maxItems = o.Count
}
case *MinPiecesOption:
if o.Count > 0 {
minPieces = o.Count
}
case *MaxPiecesOption:
if o.Count > 0 {
maxPieces = o.Count
}
case *GCConfigOption:
// GC 配置在下面处理
}
}
// 如果配置中指定了分片数,使用配置值;否则自动计算
if piecesFromConfig > 0 {
countPieces = piecesFromConfig
} else {
countPieces = calculateOptimalPieces(minPieces, maxPieces)
}
var maxPiecesPerGC = 4
var numCPU = runtime.NumCPU() / 2
if numCPU > maxPiecesPerGC {
maxPiecesPerGC = numCPU
}
// 优化:默认 GC 配置
var gcBaseInterval = 2 * time.Second
var gcMinInterval = 1 * time.Second
var gcMaxInterval = 10 * time.Second
var adaptiveGC = true
var gcSampleSize = 100
// 从配置读取 GC 参数
for _, option := range opt {
if gcOpt, ok := option.(*GCConfigOption); ok && gcOpt != nil {
if gcOpt.BaseInterval > 0 {
gcBaseInterval = gcOpt.BaseInterval
}
if gcOpt.MinInterval > 0 {
gcMinInterval = gcOpt.MinInterval
}
if gcOpt.MaxInterval > 0 {
gcMaxInterval = gcOpt.MaxInterval
}
adaptiveGC = gcOpt.Adaptive
if gcOpt.SampleSize > 0 {
gcSampleSize = gcOpt.SampleSize
}
}
}
var cache = &Cache[T]{
countPieces: uint64(countPieces),
maxItems: maxItems,
maxPiecesPerGC: maxPiecesPerGC,
gcBaseInterval: gcBaseInterval,
gcMinInterval: gcMinInterval,
gcMaxInterval: gcMaxInterval,
adaptiveGC: adaptiveGC,
gcSampleSize: gcSampleSize,
}
for i := 0; i < countPieces; i++ {
cache.pieces = append(cache.pieces, NewPiece[T](maxItems/countPieces))
}
// Add to manager
SharedManager.Add(cache)
return cache
}
func (this *Cache[T]) Write(key string, value T, expiresAt int64) (ok bool) {
if this.isDestroyed {
return
}
var currentTimestamp = fasttime.Now().Unix()
if expiresAt <= currentTimestamp {
return
}
var maxExpiresAt = currentTimestamp + 30*86400
if expiresAt > maxExpiresAt {
expiresAt = maxExpiresAt
}
var uint64Key = HashKeyString(key)
var pieceIndex = uint64Key % this.countPieces
// 优化:统计写入次数
atomic.AddInt64(&this.totalWrites, 1)
return this.pieces[pieceIndex].Add(uint64Key, &Item[T]{
Value: value,
expiresAt: expiresAt,
})
}
func (this *Cache[T]) IncreaseInt64(key string, delta T, expiresAt int64, extend bool) T {
if this.isDestroyed {
return any(0).(T)
}
var currentTimestamp = fasttime.Now().Unix()
if expiresAt <= currentTimestamp {
return any(0).(T)
}
var maxExpiresAt = currentTimestamp + 30*86400
if expiresAt > maxExpiresAt {
expiresAt = maxExpiresAt
}
var uint64Key = HashKeyString(key)
var pieceIndex = uint64Key % this.countPieces
return this.pieces[pieceIndex].IncreaseInt64(uint64Key, delta, expiresAt, extend)
}
func (this *Cache[T]) Read(key string) (item *Item[T]) {
var uint64Key = HashKeyString(key)
return this.pieces[uint64Key%this.countPieces].Read(uint64Key)
}
func (this *Cache[T]) Delete(key string) {
var uint64Key = HashKeyString(key)
this.pieces[uint64Key%this.countPieces].Delete(uint64Key)
}
func (this *Cache[T]) Count() (count int) {
for _, piece := range this.pieces {
count += piece.Count()
}
return
}
// calculateGCInterval 计算自适应 GC 间隔
func (this *Cache[T]) calculateGCInterval() time.Duration {
if !this.adaptiveGC {
return this.gcBaseInterval
}
this.gcStatsLocker.Lock()
expireRate := this.lastGCStats.ExpireRate
this.gcStatsLocker.Unlock()
// 根据过期率调整
if expireRate > 0.1 {
// 过期率高,加快 GC
return this.gcMinInterval
} else if expireRate < 0.01 {
// 过期率低,减慢 GC
return this.gcMaxInterval
}
return this.gcBaseInterval
}
// selectPiecesForGC 选择需要清理的分片(优化:优先清理过期率高的分片)
func (this *Cache[T]) selectPiecesForGC() []int {
// 如果未启用自适应,使用原有轮询策略
if !this.adaptiveGC {
var index = this.gcPieceIndex
var selected []int
for i := index; i < index+this.maxPiecesPerGC && i < int(this.countPieces); i++ {
selected = append(selected, i)
}
return selected
}
// 优化:统计每个分片的过期率
type pieceExpireInfo struct {
index int
expireRate float64
}
var pieceInfos []pieceExpireInfo
for i, piece := range this.pieces {
count := piece.Count()
if count > 0 {
// 采样检查过期率(避免全量遍历)
expireCount := piece.SampleExpireRate(this.gcSampleSize)
expireRate := float64(expireCount) / float64(this.gcSampleSize)
if expireRate > 0 {
pieceInfos = append(pieceInfos, pieceExpireInfo{
index: i,
expireRate: expireRate,
})
}
}
}
// 按过期率排序(降序)
for i := 0; i < len(pieceInfos)-1; i++ {
for j := i + 1; j < len(pieceInfos); j++ {
if pieceInfos[i].expireRate < pieceInfos[j].expireRate {
pieceInfos[i], pieceInfos[j] = pieceInfos[j], pieceInfos[i]
}
}
}
// 选择过期率最高的前 N 个分片
maxPieces := this.maxPiecesPerGC
if maxPieces > len(pieceInfos) {
maxPieces = len(pieceInfos)
}
if maxPieces == 0 {
// 如果没有找到过期项,使用轮询策略
var index = this.gcPieceIndex
var selected []int
for i := index; i < index+this.maxPiecesPerGC && i < int(this.countPieces); i++ {
selected = append(selected, i)
}
return selected
}
var selected []int
for i := 0; i < maxPieces; i++ {
selected = append(selected, pieceInfos[i].index)
}
return selected
}
func (this *Cache[T]) GC() {
var startTime = time.Now()
var totalCount = 0
var expiredCount = 0
// 优化:选择需要清理的分片
selectedPieces := this.selectPiecesForGC()
// 清理选中的分片
for _, pieceIndex := range selectedPieces {
if pieceIndex >= int(this.countPieces) {
continue
}
piece := this.pieces[pieceIndex]
count := piece.Count()
totalCount += count
// 执行 GC
expired := piece.GC()
expiredCount += expired
}
// 更新索引(用于轮询策略)
if !this.adaptiveGC || len(selectedPieces) == 0 {
var index = this.gcPieceIndex
index += this.maxPiecesPerGC
if index >= int(this.countPieces) {
index = 0
}
this.gcPieceIndex = index
}
// 更新统计信息
var expireRate float64
if totalCount > 0 {
expireRate = float64(expiredCount) / float64(totalCount)
}
this.gcStatsLocker.Lock()
this.lastGCStats = GCStats{
ExpiredCount: expiredCount,
TotalCount: totalCount,
GCTime: time.Since(startTime).Microseconds(),
ExpireRate: expireRate,
}
this.gcStatsLocker.Unlock()
atomic.AddInt64(&this.totalExpires, int64(expiredCount))
}
func (this *Cache[T]) Clean() {
for _, piece := range this.pieces {
piece.Clean()
}
}
func (this *Cache[T]) Destroy() {
SharedManager.Remove(this)
this.isDestroyed = true
for _, piece := range this.pieces {
piece.Destroy()
}
}

View File

@@ -0,0 +1,329 @@
package ttlcache
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/assert"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
timeutil "github.com/iwind/TeaGo/utils/time"
"runtime"
"runtime/debug"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
)
func TestNewCache(t *testing.T) {
var cache = NewCache[int]()
cache.Write("a", 1, time.Now().Unix()+3600)
cache.Write("b", 2, time.Now().Unix()+1)
cache.Write("c", 1, time.Now().Unix()+3602)
cache.Write("d", 1, time.Now().Unix()+1)
for _, piece := range cache.pieces {
if len(piece.m) > 0 {
for k, item := range piece.m {
t.Log(k, "=>", item.Value, item.expiresAt)
}
}
}
t.Log("a:", cache.Read("a"))
if testutils.IsSingleTesting() {
time.Sleep(5 * time.Second)
}
for i := 0; i < len(cache.pieces); i++ {
cache.GC()
}
t.Log("b:", cache.Read("b"))
t.Log("d:", cache.Read("d"))
t.Log("left:", cache.Count(), "items")
}
func TestCache_Memory(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var cache = NewCache[int]()
var isReady bool
testutils.StartMemoryStats(t, func() {
if !isReady {
return
}
t.Log(cache.Count(), "items")
})
var count = 1_000_000
if memutils.SystemMemoryGB() > 4 {
count = 20_000_000
}
var concurrent = runtime.NumCPU()
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
var id int64
for i := 0; i < concurrent; i++ {
go func() {
defer wg.Done()
for {
var newId = atomic.AddInt64(&id, 1)
if newId > int64(count) {
return
}
cache.Write("a"+types.String(newId), 1, time.Now().Unix()+int64(rands.Int(1, 300)))
}
}()
}
wg.Wait()
func() {
var before = time.Now()
runtime.GC()
var costSeconds = time.Since(before).Seconds()
var stats = &debug.GCStats{}
debug.ReadGCStats(stats)
t.Log("GC pause:", stats.Pause[0].Seconds()*1000, "ms", "cost:", costSeconds*1000, "ms")
}()
isReady = true
t.Log(cache.Count())
time.Sleep(10 * time.Second)
for i := 0; i < count; i++ {
if i%2 == 0 {
cache.Delete("a" + strconv.Itoa(i))
}
}
t.Log(cache.Count())
cache.Count()
time.Sleep(3600 * time.Second)
}
func TestCache_IncreaseInt64(t *testing.T) {
var a = assert.NewAssertion(t)
var cache = NewCache[int64]()
var unixTime = time.Now().Unix()
{
cache.IncreaseInt64("a", 1, unixTime+3600, false)
var item = cache.Read("a")
t.Log(item)
a.IsTrue(item.Value == 1)
a.IsTrue(item.expiresAt == unixTime+3600)
}
{
cache.IncreaseInt64("a", 1, unixTime+3600+1, true)
var item = cache.Read("a")
t.Log(item)
a.IsTrue(item.Value == 2)
a.IsTrue(item.expiresAt == unixTime+3600+1)
}
{
cache.Write("b", 1, time.Now().Unix()+3600+2)
t.Log(cache.Read("b"))
}
{
cache.IncreaseInt64("b", 1, time.Now().Unix()+3600+3, false)
t.Log(cache.Read("b"))
}
}
func TestCache_Read(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
runtime.GOMAXPROCS(1)
var cache = NewCache[int](PiecesOption{Count: 32})
for i := 0; i < 10_000_000; i++ {
cache.Write("HELLO_WORLD_"+strconv.Itoa(i), i, time.Now().Unix()+int64(i%10240)+1)
}
time.Sleep(10 * time.Second)
total := 0
for _, piece := range cache.pieces {
//t.Log(len(piece.m), "keys")
total += len(piece.m)
}
t.Log(total, "total keys")
before := time.Now()
for i := 0; i < 10_240; i++ {
_ = cache.Read("HELLO_WORLD_" + strconv.Itoa(i))
}
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestCache_GC(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var cache = NewCache[int](&PiecesOption{Count: 5})
cache.Write("a", 1, time.Now().Unix()+1)
cache.Write("b", 2, time.Now().Unix()+2)
cache.Write("c", 3, time.Now().Unix()+3)
cache.Write("d", 4, time.Now().Unix()+4)
cache.Write("e", 5, time.Now().Unix()+10)
go func() {
for i := 0; i < 1000; i++ {
cache.Write("f", 1, time.Now().Unix()+1)
time.Sleep(10 * time.Millisecond)
}
}()
for i := 0; i < 20; i++ {
cache.GC()
t.Log("items:", cache.Count())
if cache.Count() == 0 {
break
}
time.Sleep(1 * time.Second)
}
t.Log("now:", time.Now().Unix())
for _, p := range cache.pieces {
t.Log("expire list:", p.expiresList.Count(), p.expiresList)
for k, v := range p.m {
t.Log(k, v.Value, v.expiresAt)
}
}
}
func TestCache_GC2(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
runtime.GOMAXPROCS(1)
var cache1 = NewCache[int](NewPiecesOption(256))
for i := 0; i < 10_000_000; i++ {
cache1.Write(strconv.Itoa(i), i, time.Now().Unix()+10)
}
var cache2 = NewCache[int](NewPiecesOption(5))
for i := 0; i < 1_000_000; i++ {
cache2.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(0, 20)))
}
for i := 0; i < 3600; i++ {
t.Log(timeutil.Format("H:i:s"), cache1.Count(), "items", cache2.Count(), "items")
if cache1.Count() == 0 && cache2.Count() == 0 {
break
}
time.Sleep(1 * time.Second)
}
}
func TestCacheDestroy(t *testing.T) {
var cache = NewCache[int]()
t.Log("count:", SharedManager.Count())
cache.Destroy()
t.Log("count:", SharedManager.Count())
}
func TestCache_Clean(t *testing.T) {
var cache = NewCache[int]()
cache.Clean()
}
func TestCache_Destroy(t *testing.T) {
var cache = NewCache[int]()
t.Log(SharedManager.Count())
for i := 0; i < 1_000; i++ {
cache.Write("a"+types.String(i), 1, fasttime.Now().Unix()+3600)
}
cache.Destroy()
t.Log(SharedManager.Count())
}
func BenchmarkNewCache(b *testing.B) {
runtime.GOMAXPROCS(1)
var cache = NewCache[int](NewPiecesOption(128))
for i := 0; i < 2_000_000; i++ {
cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(10, 100)))
}
b.Log("start reading ...")
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cache.Read(strconv.Itoa(rands.Int(0, 999999)))
}
})
}
func BenchmarkCache_Add(b *testing.B) {
runtime.GOMAXPROCS(1)
var cache = NewCache[int]()
for i := 0; i < b.N; i++ {
cache.Write(strconv.Itoa(i), i, fasttime.Now().Unix()+int64(i%1024))
}
}
func BenchmarkCache_Add_Parallel(b *testing.B) {
runtime.GOMAXPROCS(1)
var cache = NewCache[int64]()
var i int64
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var j = atomic.AddInt64(&i, 1)
cache.Write(types.String(j%1e6), j, fasttime.Now().Unix()+i%1024)
}
})
}
func BenchmarkNewCacheGC(b *testing.B) {
runtime.GOMAXPROCS(1)
var cache = NewCache[int](NewPiecesOption(1024))
for i := 0; i < 3_000_000; i++ {
cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(0, 100)))
}
//b.Log(cache.pieces[0].Count())
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cache.GC()
}
})
}
func BenchmarkNewCacheClean(b *testing.B) {
runtime.GOMAXPROCS(1)
var cache = NewCache[int](NewPiecesOption(128))
for i := 0; i < 3_000_000; i++ {
cache.Write(strconv.Itoa(i), i, time.Now().Unix()+int64(rands.Int(10, 100)))
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cache.Clean()
}
})
}

View File

@@ -0,0 +1,11 @@
package ttlcache
type Item[T any] struct {
Value T
expiresAt int64
}
// ExpiresAt 获取过期时间
func (this *Item[T]) ExpiresAt() int64 {
return this.expiresAt
}

View File

@@ -0,0 +1,64 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package ttlcache
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"sync"
"time"
)
var SharedManager = NewManager()
type GCAble interface {
GC()
}
type Manager struct {
ticker *time.Ticker
locker sync.Mutex
cacheMap map[GCAble]zero.Zero
}
func NewManager() *Manager {
var manager = &Manager{
ticker: time.NewTicker(2 * time.Second),
cacheMap: map[GCAble]zero.Zero{},
}
goman.New(func() {
manager.init()
})
return manager
}
func (this *Manager) init() {
for range this.ticker.C {
this.locker.Lock()
for cache := range this.cacheMap {
cache.GC()
}
this.locker.Unlock()
}
}
func (this *Manager) Add(cache GCAble) {
this.locker.Lock()
this.cacheMap[cache] = zero.New()
this.locker.Unlock()
}
func (this *Manager) Remove(cache GCAble) {
this.locker.Lock()
delete(this.cacheMap, cache)
this.locker.Unlock()
}
func (this *Manager) Count() int {
this.locker.Lock()
defer this.locker.Unlock()
return len(this.cacheMap)
}

View File

@@ -0,0 +1,84 @@
package ttlcache
import "time"
type OptionInterface interface {
}
type PiecesOption struct {
Count int
}
func NewPiecesOption(count int) *PiecesOption {
return &PiecesOption{Count: count}
}
type MaxItemsOption struct {
Count int
}
func NewMaxItemsOption(count int) *MaxItemsOption {
return &MaxItemsOption{Count: count}
}
// MinPiecesOption 最小分片数选项
type MinPiecesOption struct {
Count int
}
func NewMinPiecesOption(count int) *MinPiecesOption {
return &MinPiecesOption{Count: count}
}
// MaxPiecesOption 最大分片数选项
type MaxPiecesOption struct {
Count int
}
func NewMaxPiecesOption(count int) *MaxPiecesOption {
return &MaxPiecesOption{Count: count}
}
// GCConfigOption GC 配置选项
type GCConfigOption struct {
BaseInterval time.Duration
MinInterval time.Duration
MaxInterval time.Duration
Adaptive bool
SampleSize int
}
func NewGCConfigOption() *GCConfigOption {
return &GCConfigOption{
BaseInterval: 2 * time.Second,
MinInterval: 1 * time.Second,
MaxInterval: 10 * time.Second,
Adaptive: true,
SampleSize: 100,
}
}
func (this *GCConfigOption) WithBaseInterval(interval time.Duration) *GCConfigOption {
this.BaseInterval = interval
return this
}
func (this *GCConfigOption) WithMinInterval(interval time.Duration) *GCConfigOption {
this.MinInterval = interval
return this
}
func (this *GCConfigOption) WithMaxInterval(interval time.Duration) *GCConfigOption {
this.MaxInterval = interval
return this
}
func (this *GCConfigOption) WithAdaptive(adaptive bool) *GCConfigOption {
this.Adaptive = adaptive
return this
}
func (this *GCConfigOption) WithSampleSize(size int) *GCConfigOption {
this.SampleSize = size
return this
}

View File

@@ -0,0 +1,198 @@
package ttlcache
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/expires"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"sync"
)
type Piece[T any] struct {
m map[uint64]*Item[T]
expiresList *expires.List
maxItems int
lastGCTime int64
locker sync.RWMutex
}
func NewPiece[T any](maxItems int) *Piece[T] {
return &Piece[T]{
m: map[uint64]*Item[T]{},
expiresList: expires.NewSingletonList(),
maxItems: maxItems,
}
}
func (this *Piece[T]) Add(key uint64, item *Item[T]) (ok bool) {
this.locker.RLock()
if this.maxItems > 0 && len(this.m) >= this.maxItems {
this.locker.RUnlock()
return
}
this.locker.RUnlock()
this.locker.Lock()
oldItem, exists := this.m[key]
if exists && oldItem.expiresAt == item.expiresAt {
this.locker.Unlock()
return true
}
this.m[key] = item
this.locker.Unlock()
this.expiresList.Add(key, item.expiresAt)
return true
}
func (this *Piece[T]) IncreaseInt64(key uint64, delta T, expiresAt int64, extend bool) (result T) {
this.locker.Lock()
item, ok := this.m[key]
if ok && item.expiresAt > fasttime.Now().Unix() {
int64Value, isInt64 := any(item.Value).(int64)
if isInt64 {
result = any(int64Value + any(delta).(int64)).(T)
}
item.Value = result
if extend {
item.expiresAt = expiresAt
}
this.expiresList.Add(key, expiresAt)
} else {
if len(this.m) < this.maxItems {
result = delta
this.m[key] = &Item[T]{
Value: delta,
expiresAt: expiresAt,
}
this.expiresList.Add(key, expiresAt)
}
}
this.locker.Unlock()
return
}
func (this *Piece[T]) Delete(key uint64) {
this.expiresList.Remove(key)
this.locker.Lock()
delete(this.m, key)
this.locker.Unlock()
}
func (this *Piece[T]) Read(key uint64) (item *Item[T]) {
this.locker.RLock()
item = this.m[key]
if item != nil && item.expiresAt < fasttime.Now().Unix() {
item = nil
}
this.locker.RUnlock()
return
}
func (this *Piece[T]) Count() (count int) {
this.locker.RLock()
count = len(this.m)
this.locker.RUnlock()
return
}
// SampleExpireRate 采样检查过期率(优化:避免全量遍历)
func (this *Piece[T]) SampleExpireRate(sampleSize int) int {
this.locker.RLock()
defer this.locker.RUnlock()
if len(this.m) == 0 {
return 0
}
var currentTime = fasttime.Now().Unix()
var expiredCount = 0
var checkedCount = 0
// 采样检查
for _, item := range this.m {
if checkedCount >= sampleSize {
break
}
checkedCount++
if item.expiresAt <= currentTime {
expiredCount++
}
}
return expiredCount
}
func (this *Piece[T]) GC() int {
var currentTime = fasttime.Now().Unix()
// 优化:如果上次 GC 时间很近,跳过(避免频繁 GC
if this.lastGCTime > 0 && currentTime-this.lastGCTime < 1 {
return 0
}
if this.lastGCTime == 0 {
this.lastGCTime = currentTime - 3600
}
var minTime = this.lastGCTime
var maxTime = currentTime
if minTime > maxTime {
// 过去的时间比现在大,则从这一秒重新开始
minTime = maxTime
}
// 优化:批量收集过期项,减少锁持有时间
var expiredKeys []uint64
for i := minTime; i <= maxTime; i++ {
var itemMap = this.expiresList.GC(i)
if len(itemMap) > 0 {
for key := range itemMap {
expiredKeys = append(expiredKeys, key)
}
}
}
// 批量删除
var expiredCount = len(expiredKeys)
if expiredCount > 0 {
// 重新构建 itemMap
itemMap := make(expires.ItemMap)
for _, key := range expiredKeys {
itemMap[key] = zero.New()
}
this.gcItemMap(itemMap)
}
this.lastGCTime = currentTime
return expiredCount
}
func (this *Piece[T]) Clean() {
this.locker.Lock()
this.m = map[uint64]*Item[T]{}
this.locker.Unlock()
this.expiresList.Clean()
}
func (this *Piece[T]) Destroy() {
this.locker.Lock()
this.m = nil
this.expiresList.Clean()
this.locker.Unlock()
this.expiresList.Clean()
}
func (this *Piece[T]) gcItemMap(itemMap expires.ItemMap) {
this.locker.Lock()
for key := range itemMap {
delete(this.m, key)
}
this.locker.Unlock()
}

View File

@@ -0,0 +1,70 @@
package ttlcache
import (
"github.com/iwind/TeaGo/rands"
"testing"
"time"
)
func TestPiece_Add(t *testing.T) {
piece := NewPiece[int](10)
piece.Add(1, &Item[int]{expiresAt: time.Now().Unix() + 3600})
piece.Add(2, &Item[int]{})
piece.Add(3, &Item[int]{})
piece.Delete(3)
for key, item := range piece.m {
t.Log(key, item.Value)
}
t.Log(piece.Read(1))
}
func TestPiece_Add_Same(t *testing.T) {
var piece = NewPiece[int](10)
piece.Add(1, &Item[int]{expiresAt: time.Now().Unix() + 3600})
piece.Add(1, &Item[int]{expiresAt: time.Now().Unix() + 3600})
for key, item := range piece.m {
t.Log(key, item.Value)
}
t.Log(piece.Read(1))
}
func TestPiece_MaxItems(t *testing.T) {
piece := NewPiece[int](10)
for i := 0; i < 1000; i++ {
piece.Add(uint64(i), &Item[int]{expiresAt: time.Now().Unix() + 3600})
}
t.Log(len(piece.m))
}
func TestPiece_GC(t *testing.T) {
piece := NewPiece[int](10)
piece.Add(1, &Item[int]{Value: 1, expiresAt: time.Now().Unix() + 1})
piece.Add(2, &Item[int]{Value: 2, expiresAt: time.Now().Unix() + 1})
piece.Add(3, &Item[int]{Value: 3, expiresAt: time.Now().Unix() + 1})
t.Log("before gc ===")
for key, item := range piece.m {
t.Log(key, item.Value)
}
time.Sleep(1 * time.Second)
piece.GC()
t.Log("after gc ===")
for key, item := range piece.m {
t.Log(key, item.Value)
}
}
func TestPiece_GC2(t *testing.T) {
piece := NewPiece[int](10)
for i := 0; i < 10_000; i++ {
piece.Add(uint64(i), &Item[int]{Value: 1, expiresAt: time.Now().Unix() + int64(rands.Int(1, 10))})
}
time.Sleep(1 * time.Second)
before := time.Now()
piece.GC()
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log(piece.Count())
}

View File

@@ -0,0 +1,11 @@
package ttlcache
import "github.com/cespare/xxhash/v2"
func HashKeyBytes(key []byte) uint64 {
return xxhash.Sum64(key)
}
func HashKeyString(key string) uint64 {
return xxhash.Sum64String(key)
}

View File

@@ -0,0 +1,52 @@
package ttlcache_test
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/cespare/xxhash/v2"
"runtime"
"strconv"
"testing"
)
func TestHashCollision(t *testing.T) {
var m = map[uint64]zero.Zero{}
var count = 1_000
if testutils.IsSingleTesting() {
count = 100_000_000
}
for i := 0; i < count; i++ {
var k = ttlcache.HashKeyString(strconv.Itoa(i))
_, ok := m[k]
if ok {
t.Fatal("collision at", i)
}
m[k] = zero.New()
}
t.Log(len(m), "elements")
}
func BenchmarkHashKey_Bytes(b *testing.B) {
runtime.GOMAXPROCS(1)
for i := 0; i < b.N; i++ {
ttlcache.HashKeyBytes([]byte("HELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLD"))
}
}
func BenchmarkHashKey_String(b *testing.B) {
runtime.GOMAXPROCS(1)
for i := 0; i < b.N; i++ {
ttlcache.HashKeyString("HELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLD")
}
}
func BenchmarkHashKey_XXHash(b *testing.B) {
runtime.GOMAXPROCS(1)
for i := 0; i < b.N; i++ {
xxhash.Sum64String("HELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLD")
}
}