Initial commit (code only without large binaries)

This commit is contained in:
robin
2026-02-15 18:58:44 +08:00
commit 35df75498f
9442 changed files with 1495866 additions and 0 deletions

View File

@@ -0,0 +1,302 @@
package accesslogs
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"gopkg.in/natefinch/lumberjack.v2"
)
var (
sharedFileWriter *FileWriter
sharedOnce sync.Once
)
// SharedFileWriter 返回全局本地日志文件写入器(单例)
func SharedFileWriter() *FileWriter {
sharedOnce.Do(func() {
sharedFileWriter = NewFileWriter()
})
return sharedFileWriter
}
const (
defaultLogDir = "/var/log/edge/edge-node"
envLogDir = "EDGE_LOG_DIR"
)
// FileWriter 将访问/WAF/错误日志以 JSON Lines 写入本地文件,便于 Fluent Bit 采集。
// 文件轮转由 lumberjack 内建完成。
type FileWriter struct {
dir string
mu sync.Mutex
files map[string]*lumberjack.Logger // access.log, waf.log, error.log
rotateConfig *serverconfigs.AccessLogRotateConfig
inited bool
}
// NewFileWriter 创建本地日志文件写入器
func NewFileWriter() *FileWriter {
dir := resolveDefaultLogDir()
return &FileWriter{
dir: dir,
files: make(map[string]*lumberjack.Logger),
rotateConfig: serverconfigs.NewDefaultAccessLogRotateConfig(),
}
}
func resolveDefaultLogDir() string {
dir := strings.TrimSpace(os.Getenv(envLogDir))
if dir == "" {
return defaultLogDir
}
return dir
}
func resolveDirFromPolicyPath(policyPath string) string {
policyPath = strings.TrimSpace(policyPath)
if policyPath == "" {
return ""
}
if strings.HasSuffix(policyPath, "/") || strings.HasSuffix(policyPath, "\\") {
return filepath.Clean(policyPath)
}
baseName := filepath.Base(policyPath)
if strings.Contains(baseName, ".") || strings.Contains(baseName, "${") {
return filepath.Clean(filepath.Dir(policyPath))
}
return filepath.Clean(policyPath)
}
// Dir 返回当前配置的日志目录
func (w *FileWriter) Dir() string {
return w.dir
}
// SetDirByPolicyPath 使用公用日志策略 path 更新目录,空值时回退到 EDGE_LOG_DIR/default。
func (w *FileWriter) SetDirByPolicyPath(policyPath string) {
dir := resolveDirFromPolicyPath(policyPath)
w.SetDir(dir)
}
// SetDir 更新日志目录并重置文件句柄。
func (w *FileWriter) SetDir(dir string) {
if strings.TrimSpace(dir) == "" {
dir = resolveDefaultLogDir()
}
w.mu.Lock()
defer w.mu.Unlock()
if dir == w.dir {
return
}
for name, file := range w.files {
if file != nil {
_ = file.Close()
}
w.files[name] = nil
}
w.inited = false
w.dir = dir
}
// SetRotateConfig 更新日志轮转配置并重建 writer。
func (w *FileWriter) SetRotateConfig(config *serverconfigs.AccessLogRotateConfig) {
normalized := config.Normalize()
w.mu.Lock()
defer w.mu.Unlock()
if equalRotateConfig(w.rotateConfig, normalized) {
return
}
for name, file := range w.files {
if file != nil {
_ = file.Close()
}
w.files[name] = nil
}
w.inited = false
w.rotateConfig = normalized
}
// IsEnabled 是否启用落盘(目录非空即视为启用)
func (w *FileWriter) IsEnabled() bool {
return w.dir != ""
}
// EnsureInit 在启动时预创建日志目录和空文件,便于 Fluent Bit 立即 tail无需等首条访问日志
func (w *FileWriter) EnsureInit() error {
if w.dir == "" {
return nil
}
return w.init()
}
// init 确保目录存在并打开三个日志文件(仅首次或 Reopen 时)
func (w *FileWriter) init() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.inited && len(w.files) > 0 {
return nil
}
if w.dir == "" {
return nil
}
if err := os.MkdirAll(w.dir, 0755); err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "mkdir log dir failed: "+err.Error())
return err
}
for _, name := range []string{"access.log", "waf.log", "error.log"} {
if w.files[name] != nil {
continue
}
w.files[name] = w.newLogger(name)
}
w.inited = true
return nil
}
func (w *FileWriter) newLogger(fileName string) *lumberjack.Logger {
rotateConfig := w.rotateConfig.Normalize()
return &lumberjack.Logger{
Filename: filepath.Join(w.dir, fileName),
MaxSize: rotateConfig.MaxSizeMB,
MaxBackups: rotateConfig.MaxBackups,
MaxAge: rotateConfig.MaxAgeDays,
Compress: *rotateConfig.Compress,
LocalTime: *rotateConfig.LocalTime,
}
}
// Write 将一条访问日志按 log_type 写入对应文件access.log / waf.log / error.log
func (w *FileWriter) Write(l *pb.HTTPAccessLog, clusterId int64) {
if w.dir == "" {
return
}
if err := w.init(); err != nil || len(w.files) == 0 {
return
}
ingest, logType := FromHTTPAccessLog(l, clusterId)
line, err := json.Marshal(ingest)
if err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "marshal ingest log: "+err.Error())
return
}
var fileName string
switch logType {
case LogTypeWAF:
fileName = "waf.log"
case LogTypeError:
fileName = "error.log"
default:
fileName = "access.log"
}
w.mu.Lock()
file := w.files[fileName]
w.mu.Unlock()
if file == nil {
return
}
_, err = file.Write(append(line, '\n'))
if err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "write "+fileName+" failed: "+err.Error())
}
}
// WriteBatch 批量写入,减少锁竞争
func (w *FileWriter) WriteBatch(logs []*pb.HTTPAccessLog, clusterId int64) {
if w.dir == "" || len(logs) == 0 {
return
}
if err := w.init(); err != nil || len(w.files) == 0 {
return
}
w.mu.Lock()
accessFile := w.files["access.log"]
wafFile := w.files["waf.log"]
errorFile := w.files["error.log"]
w.mu.Unlock()
if accessFile == nil && wafFile == nil && errorFile == nil {
return
}
for _, logItem := range logs {
ingest, logType := FromHTTPAccessLog(logItem, clusterId)
line, err := json.Marshal(ingest)
if err != nil {
continue
}
line = append(line, '\n')
var file *lumberjack.Logger
switch logType {
case LogTypeWAF:
file = wafFile
case LogTypeError:
file = errorFile
default:
file = accessFile
}
if file != nil {
_, _ = file.Write(line)
}
}
}
// Reopen 关闭并重建所有日志 writer供 SIGHUP 兼容调用)。
func (w *FileWriter) Reopen() error {
if w.dir == "" {
return nil
}
w.mu.Lock()
for name, file := range w.files {
if file != nil {
_ = file.Close()
w.files[name] = nil
}
}
w.inited = false
w.mu.Unlock()
return w.init()
}
// Close 关闭所有已打开的文件
func (w *FileWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
var lastErr error
for name, file := range w.files {
if file != nil {
if err := file.Close(); err != nil {
lastErr = err
remotelogs.Error("ACCESS_LOG_FILE", fmt.Sprintf("close %s: %v", name, err))
}
w.files[name] = nil
}
}
w.inited = false
return lastErr
}
func equalRotateConfig(left *serverconfigs.AccessLogRotateConfig, right *serverconfigs.AccessLogRotateConfig) bool {
if left == nil || right == nil {
return left == right
}
return left.MaxSizeMB == right.MaxSizeMB &&
left.MaxBackups == right.MaxBackups &&
left.MaxAgeDays == right.MaxAgeDays &&
*left.Compress == *right.Compress &&
*left.LocalTime == *right.LocalTime
}

View File

@@ -0,0 +1,137 @@
// Package accesslogs 提供边缘节点访问日志落盘JSON Lines供 Fluent Bit 采集写入 ClickHouse。
package accesslogs
import (
"encoding/json"
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
)
// LogType 与 Fluent Bit / logs_ingest 的 log_type 一致
const (
LogTypeAccess = "access"
LogTypeWAF = "waf"
LogTypeError = "error"
)
// 请求/响应 body 落盘最大长度(字节),超出截断,避免单条过大
const maxBodyLen = 512 * 1024
// IngestLog 单行 JSON 结构与方案文档、ClickHouse logs_ingest 表字段对齐
type IngestLog struct {
Timestamp int64 `json:"timestamp"`
NodeId int64 `json:"node_id"`
ClusterId int64 `json:"cluster_id"`
ServerId int64 `json:"server_id"`
Host string `json:"host"`
IP string `json:"ip"`
Method string `json:"method"`
Path string `json:"path"`
Status int32 `json:"status"`
BytesIn int64 `json:"bytes_in"`
BytesOut int64 `json:"bytes_out"`
CostMs int64 `json:"cost_ms"`
UA string `json:"ua"`
Referer string `json:"referer"`
LogType string `json:"log_type"`
TraceId string `json:"trace_id,omitempty"`
FirewallPolicyId int64 `json:"firewall_policy_id,omitempty"`
FirewallRuleGroupId int64 `json:"firewall_rule_group_id,omitempty"`
FirewallRuleSetId int64 `json:"firewall_rule_set_id,omitempty"`
FirewallRuleId int64 `json:"firewall_rule_id,omitempty"`
RequestHeaders string `json:"request_headers,omitempty"`
RequestBody string `json:"request_body,omitempty"`
ResponseHeaders string `json:"response_headers,omitempty"`
ResponseBody string `json:"response_body,omitempty"`
}
// stringsMapToJSON 将 map[string]*Strings 转为 JSON 字符串,便于落盘与 ClickHouse 存储
func stringsMapToJSON(m map[string]*pb.Strings) string {
if len(m) == 0 {
return ""
}
out := make(map[string]string, len(m))
for k, v := range m {
if v != nil && len(v.Values) > 0 {
out[k] = v.Values[0]
}
}
if len(out) == 0 {
return ""
}
b, _ := json.Marshal(out)
return string(b)
}
// truncateBody 截断 body 到最大长度,避免单条过大
func truncateBody(b []byte) string {
if len(b) == 0 {
return ""
}
s := string(b)
if len(s) > maxBodyLen {
return s[:maxBodyLen]
}
return s
}
// buildRequestBody 将查询串与请求体合并写入 request_body 字段(不新增字段)
func buildRequestBody(l *pb.HTTPAccessLog) string {
q := l.GetQueryString()
body := l.GetRequestBody()
if len(q) == 0 && len(body) == 0 {
return ""
}
if len(body) == 0 {
return truncateBody([]byte(q))
}
combined := make([]byte, 0, len(q)+1+len(body))
combined = append(combined, q...)
combined = append(combined, '\n')
combined = append(combined, body...)
return truncateBody(combined)
}
// FromHTTPAccessLog 从 pb.HTTPAccessLog 转为 IngestLog并决定 log_type
func FromHTTPAccessLog(l *pb.HTTPAccessLog, clusterId int64) (ingest IngestLog, logType string) {
ingest = IngestLog{
Timestamp: l.GetTimestamp(),
NodeId: l.GetNodeId(),
ClusterId: clusterId,
ServerId: l.GetServerId(),
Host: l.GetHost(),
IP: l.GetRawRemoteAddr(),
Method: l.GetRequestMethod(),
Path: l.GetRequestURI(), // 使用 RequestURI 以包含查询参数
Status: l.GetStatus(),
BytesIn: l.GetRequestLength(),
BytesOut: l.GetBytesSent(),
CostMs: int64(l.GetRequestTime() * 1000),
UA: l.GetUserAgent(),
Referer: l.GetReferer(),
TraceId: l.GetRequestId(),
FirewallPolicyId: l.GetFirewallPolicyId(),
FirewallRuleGroupId: l.GetFirewallRuleGroupId(),
FirewallRuleSetId: l.GetFirewallRuleSetId(),
FirewallRuleId: l.GetFirewallRuleId(),
RequestHeaders: stringsMapToJSON(l.GetHeader()),
RequestBody: buildRequestBody(l),
ResponseHeaders: stringsMapToJSON(l.GetSentHeader()),
}
if ingest.IP == "" {
ingest.IP = l.GetRemoteAddr()
}
// 响应 body 当前 pb 未提供,若后续扩展可在此赋值
// ingest.ResponseBody = ...
// 与方案一致waf > error > access攻击日志通过 firewall_rule_id / firewall_policy_id 判断
if l.GetFirewallPolicyId() > 0 {
logType = LogTypeWAF
} else if len(l.GetErrors()) > 0 {
logType = LogTypeError
} else {
logType = LogTypeAccess
}
ingest.LogType = logType
return ingest, logType
}

View File

@@ -0,0 +1,349 @@
package apps
import (
"errors"
"fmt"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
executils "github.com/TeaOSLab/EdgeNode/internal/utils/exec"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/maps"
"github.com/iwind/TeaGo/types"
"github.com/iwind/gosock/pkg/gosock"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
)
// AppCmd App命令帮助
type AppCmd struct {
product string
version string
usages []string
options []*CommandHelpOption
appendStrings []string
directives []*Directive
sock *gosock.Sock
}
func NewAppCmd() *AppCmd {
return &AppCmd{
sock: gosock.NewTmpSock(teaconst.ProcessName),
}
}
type CommandHelpOption struct {
Code string
Description string
}
// Product 产品
func (this *AppCmd) Product(product string) *AppCmd {
this.product = product
return this
}
// Version 版本
func (this *AppCmd) Version(version string) *AppCmd {
this.version = version
return this
}
// Usage 使用方法
func (this *AppCmd) Usage(usage string) *AppCmd {
this.usages = append(this.usages, usage)
return this
}
// Option 选项
func (this *AppCmd) Option(code string, description string) *AppCmd {
this.options = append(this.options, &CommandHelpOption{
Code: code,
Description: description,
})
return this
}
// Append 附加内容
func (this *AppCmd) Append(appendString string) *AppCmd {
this.appendStrings = append(this.appendStrings, appendString)
return this
}
// Print 打印
func (this *AppCmd) Print() {
fmt.Println(this.product + " v" + this.version)
fmt.Println("Usage:")
for _, usage := range this.usages {
fmt.Println(" " + usage)
}
if len(this.options) > 0 {
fmt.Println("")
fmt.Println("Options:")
var spaces = 20
var max = 40
for _, option := range this.options {
l := len(option.Code)
if l < max && l > spaces {
spaces = l + 4
}
}
for _, option := range this.options {
if len(option.Code) > max {
fmt.Println("")
fmt.Println(" " + option.Code)
option.Code = ""
}
fmt.Printf(" %-"+strconv.Itoa(spaces)+"s%s\n", option.Code, ": "+option.Description)
}
}
if len(this.appendStrings) > 0 {
fmt.Println("")
for _, s := range this.appendStrings {
fmt.Println(s)
}
}
}
// On 添加指令
func (this *AppCmd) On(arg string, callback func()) {
this.directives = append(this.directives, &Directive{
Arg: arg,
Callback: callback,
})
}
// Run 运行
func (this *AppCmd) Run(main func()) {
// 获取参数
var args = os.Args[1:]
if len(args) > 0 {
var mainArg = args[0]
this.callDirective(mainArg + ":before")
switch mainArg {
case "-v", "version", "-version", "--version":
this.runVersion()
return
case "?", "help", "-help", "h", "-h":
this.runHelp()
return
case "start":
this.runStart()
return
case "stop":
this.runStop()
return
case "restart":
this.runRestart()
return
case "status":
this.runStatus()
return
}
// 查找指令
for _, directive := range this.directives {
if directive.Arg == mainArg {
directive.Callback()
return
}
}
fmt.Println("unknown command '" + mainArg + "'")
return
}
// 日志
var writer = new(LogWriter)
writer.Init()
logs.SetWriter(writer)
// 运行主函数
main()
}
// 版本号
func (this *AppCmd) runVersion() {
fmt.Println(this.product+" v"+this.version, "(build: "+runtime.Version(), runtime.GOOS, runtime.GOARCH, teaconst.Tag+")")
}
// 帮助
func (this *AppCmd) runHelp() {
this.Print()
}
// 启动
func (this *AppCmd) runStart() {
var pid = this.getPID()
if pid > 0 {
fmt.Println(this.product+" already started, pid:", pid)
return
}
_ = os.Setenv("EdgeBackground", "on")
var cmd = exec.Command(this.exe())
configureSysProcAttr(cmd)
err := cmd.Start()
if err != nil {
fmt.Println(this.product+" start failed:", err.Error())
return
}
// create symbolic links
_ = this.createSymLinks()
fmt.Println(this.product+" started ok, pid:", cmd.Process.Pid)
}
// 停止
func (this *AppCmd) runStop() {
var pid = this.getPID()
if pid == 0 {
fmt.Println(this.product + " not started yet")
return
}
// 从systemd中停止
if runtime.GOOS == "linux" {
systemctl, _ := executils.LookPath("systemctl")
if len(systemctl) > 0 {
go func() {
// 有可能会长时间执行,这里不阻塞进程
_ = exec.Command(systemctl, "stop", teaconst.SystemdServiceName).Run()
}()
}
}
// 如果仍在运行,则发送停止指令
_, _ = this.sock.SendTimeout(&gosock.Command{Code: "stop"}, 1*time.Second)
fmt.Println(this.product+" stopped ok, pid:", types.String(pid))
}
// 重启
func (this *AppCmd) runRestart() {
this.runStop()
time.Sleep(1 * time.Second)
this.runStart()
}
// 状态
func (this *AppCmd) runStatus() {
var pid = this.getPID()
if pid == 0 {
fmt.Println(this.product + " not started yet")
return
}
fmt.Println(this.product + " is running, pid: " + types.String(pid))
}
// 获取当前的PID
func (this *AppCmd) getPID() int {
if !this.sock.IsListening() {
return 0
}
reply, err := this.sock.Send(&gosock.Command{Code: "pid"})
if err != nil {
return 0
}
return maps.NewMap(reply.Params).GetInt("pid")
}
// ParseOptions 分析参数中的选项
func (this *AppCmd) ParseOptions(args []string) map[string][]string {
var result = map[string][]string{}
for _, arg := range args {
var pieces = strings.SplitN(arg, "=", 2)
var key = strings.TrimLeft(pieces[0], "- ")
key = strings.TrimSpace(key)
var value = ""
if len(pieces) == 2 {
value = strings.TrimSpace(pieces[1])
}
result[key] = append(result[key], value)
}
return result
}
func (this *AppCmd) exe() string {
var exe, _ = os.Executable()
if len(exe) == 0 {
exe = os.Args[0]
}
return exe
}
// 创建软链接
func (this *AppCmd) createSymLinks() error {
if runtime.GOOS != "linux" {
return nil
}
var exe, _ = os.Executable()
if len(exe) == 0 {
return nil
}
var errorList = []string{}
// bin
{
var target = "/usr/bin/" + teaconst.ProcessName
old, _ := filepath.EvalSymlinks(target)
if old != exe {
_ = os.Remove(target)
err := os.Symlink(exe, target)
if err != nil {
errorList = append(errorList, err.Error())
}
}
}
// log
{
var realPath = filepath.Dir(filepath.Dir(exe)) + "/logs/run.log"
var target = "/var/log/" + teaconst.ProcessName + ".log"
old, _ := filepath.EvalSymlinks(target)
if old != realPath {
_ = os.Remove(target)
err := os.Symlink(realPath, target)
if err != nil {
errorList = append(errorList, err.Error())
}
}
}
if len(errorList) > 0 {
return errors.New(strings.Join(errorList, "\n"))
}
return nil
}
func (this *AppCmd) callDirective(code string) {
for _, directive := range this.directives {
if directive.Arg == code {
if directive.Callback != nil {
directive.Callback()
}
return
}
}
}

View File

@@ -0,0 +1,15 @@
//go:build !windows
package apps
import (
"os/exec"
"syscall"
)
func configureSysProcAttr(cmd *exec.Cmd) {
cmd.SysProcAttr = &syscall.SysProcAttr{
Foreground: false,
Setsid: true,
}
}

View File

@@ -0,0 +1,6 @@
package apps
type Directive struct {
Arg string
Callback func()
}

View File

@@ -0,0 +1,111 @@
package apps
import (
"github.com/TeaOSLab/EdgeNode/internal/utils"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/files"
timeutil "github.com/iwind/TeaGo/utils/time"
"log"
"os"
"runtime"
"strconv"
"strings"
)
type LogWriter struct {
fp *os.File
c chan string
}
func (this *LogWriter) Init() {
// 创建目录
var dir = files.NewFile(Tea.LogDir())
if !dir.Exists() {
err := dir.Mkdir()
if err != nil {
log.Println("[LOG]create log dir failed: " + err.Error())
}
}
// 打开要写入的日志文件
var logPath = Tea.LogFile("run.log")
fp, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
log.Println("[LOG]open log file failed: " + err.Error())
} else {
this.fp = fp
}
this.c = make(chan string, 1024)
// 异步写入文件
var maxFileSize int64 = 128 << 20 // 文件最大尺寸,超出此尺寸则清空
if fp != nil {
goman.New(func() {
var totalSize int64 = 0
stat, err := fp.Stat()
if err == nil {
totalSize = stat.Size()
}
for message := range this.c {
totalSize += int64(len(message))
_, err := fp.WriteString(timeutil.Format("Y/m/d H:i:s ") + message + "\n")
if err != nil {
log.Println("[LOG]write log failed: " + err.Error())
} else {
// 如果太大则Truncate
if totalSize > maxFileSize {
_ = fp.Truncate(0)
totalSize = 0
}
}
}
})
}
}
func (this *LogWriter) Write(message string) {
backgroundEnv, _ := os.LookupEnv("EdgeBackground")
if backgroundEnv != "on" {
// 文件和行号
var file string
var line int
if Tea.IsTesting() {
var callDepth = 3
var ok bool
_, file, line, ok = runtime.Caller(callDepth)
if ok {
file = utils.RemoveWorkspace(this.packagePath(file))
}
}
if len(file) > 0 {
log.Println(message + " (" + file + ":" + strconv.Itoa(line) + ")")
} else {
log.Println(message)
}
}
select {
case this.c <- message:
default:
}
}
func (this *LogWriter) Close() {
if this.fp != nil {
_ = this.fp.Close()
}
close(this.c)
}
func (this *LogWriter) packagePath(path string) string {
var pieces = strings.Split(path, "/")
if len(pieces) >= 2 {
return strings.Join(pieces[len(pieces)-2:], "/")
}
return path
}

View File

@@ -0,0 +1,11 @@
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package apps
import teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
func RunMain(f func()) {
if teaconst.IsMain {
f()
}
}

View File

@@ -0,0 +1,11 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
const (
SuffixAll = "@GOEDGE_" // 通用后缀
SuffixWebP = "@GOEDGE_WEBP" // WebP后缀
SuffixCompression = "@GOEDGE_" // 压缩后缀 SuffixCompression + Encoding
SuffixMethod = "@GOEDGE_" // 请求方法后缀 SuffixMethod + RequestMethod
SuffixPartial = "@GOEDGE_partial" // 分区缓存后缀
)

View File

@@ -0,0 +1,61 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import "errors"
// 常用的几个错误
var (
ErrNotFound = errors.New("cache not found")
ErrFileIsWriting = errors.New("the cache file is updating")
ErrInvalidRange = errors.New("invalid range")
ErrEntityTooLarge = errors.New("entity too large")
ErrWritingUnavailable = errors.New("writing unavailable")
ErrWritingQueueFull = errors.New("writing queue full")
ErrServerIsBusy = errors.New("server is busy")
ErrUnexpectedContentLength = errors.New("unexpected content length")
)
// CapacityError 容量错误
// 独立出来是为了可以在有些场合下可以忽略,防止产生没必要的错误提示数量太多
type CapacityError struct {
err string
}
func NewCapacityError(err string) error {
return &CapacityError{err: err}
}
func (this *CapacityError) Error() string {
return this.err
}
// CanIgnoreErr 检查错误是否可以忽略
func CanIgnoreErr(err error) bool {
if err == nil {
return true
}
if errors.Is(err, ErrFileIsWriting) ||
errors.Is(err, ErrEntityTooLarge) ||
errors.Is(err, ErrWritingUnavailable) ||
errors.Is(err, ErrWritingQueueFull) ||
errors.Is(err, ErrServerIsBusy) {
return true
}
var capacityErr *CapacityError
return errors.As(err, &capacityErr)
}
func IsCapacityError(err error) bool {
if err == nil {
return false
}
var capacityErr *CapacityError
return errors.As(err, &capacityErr)
}
func IsBusyError(err error) bool {
return err != nil && errors.Is(err, ErrServerIsBusy)
}

View File

@@ -0,0 +1,24 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"errors"
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestCanIgnoreErr(t *testing.T) {
var a = assert.NewAssertion(t)
a.IsTrue(caches.CanIgnoreErr(caches.ErrFileIsWriting))
a.IsTrue(caches.CanIgnoreErr(fmt.Errorf("error: %w", caches.ErrFileIsWriting)))
a.IsTrue(errors.Is(fmt.Errorf("error: %w", caches.ErrFileIsWriting), caches.ErrFileIsWriting))
a.IsTrue(errors.Is(caches.ErrFileIsWriting, caches.ErrFileIsWriting))
a.IsTrue(caches.CanIgnoreErr(caches.NewCapacityError("over capacity")))
a.IsTrue(caches.CanIgnoreErr(fmt.Errorf("error: %w", caches.NewCapacityError("over capacity"))))
a.IsFalse(caches.CanIgnoreErr(caches.ErrNotFound))
a.IsFalse(caches.CanIgnoreErr(errors.New("test error")))
}

View File

@@ -0,0 +1,12 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import "github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
type FileDir struct {
Path string
Capacity *shared.SizeCapacity
IsFull bool
IsSSD bool // 优化:是否为 SSD
}

View File

@@ -0,0 +1,8 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
type HotItem struct {
Key string
Hits uint32
}

View File

@@ -0,0 +1,59 @@
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"strings"
)
type ItemType = int
const (
ItemTypeFile ItemType = 1
ItemTypeMemory ItemType = 2
)
// 计算当前周
// 不要用YW因为需要计算两周是否临近
func currentWeek() int32 {
return int32(fasttime.Now().Unix() / 86400)
}
type Item struct {
Type ItemType `json:"-"`
Key string `json:"1,omitempty"`
ExpiresAt int64 `json:"2,omitempty"`
StaleAt int64 `json:"3,omitempty"`
HeaderSize int64 `json:"-"`
BodySize int64 `json:"4,omitempty"`
MetaSize int64 `json:"-"`
Host string `json:"-"` // 主机名
ServerId int64 `json:"5,omitempty"` // 服务ID
Week int32 `json:"-"`
CreatedAt int64 `json:"6,omitempty"`
}
func (this *Item) IsExpired() bool {
return this.ExpiresAt < fasttime.Now().Unix()
}
func (this *Item) TotalSize() int64 {
return this.Size() + this.MetaSize + int64(len(this.Key)) + int64(len(this.Host))
}
func (this *Item) Size() int64 {
return this.HeaderSize + this.BodySize
}
func (this *Item) RequestURI() string {
var schemeIndex = strings.Index(this.Key, "://")
if schemeIndex <= 0 {
return ""
}
var firstSlashIndex = strings.Index(this.Key[schemeIndex+3:], "/")
if firstSlashIndex <= 0 {
return ""
}
return this.Key[schemeIndex+3+firstSlashIndex:]
}

View File

@@ -0,0 +1,118 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"encoding/json"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
"runtime"
"testing"
"time"
)
func TestItem_Marshal(t *testing.T) {
{
var item = &caches.Item{}
data, err := json.Marshal(item)
if err != nil {
t.Fatal(err)
}
t.Log(string(data))
}
{
var item = &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://example.com/index.html",
ExpiresAt: fasttime.Now().Unix(),
HeaderSize: 1 << 10,
BodySize: 1 << 20,
MetaSize: 256,
}
data, err := json.Marshal(item)
if err != nil {
t.Fatal(err)
}
t.Log(string(data))
}
}
func TestItems_Memory(t *testing.T) {
var stat = &runtime.MemStats{}
runtime.ReadMemStats(stat)
var memory1 = stat.HeapInuse
var items = []*caches.Item{}
var count = 100
if testutils.IsSingleTesting() {
count = 10_000_000
}
for i := 0; i < count; i++ {
items = append(items, &caches.Item{
Key: types.String(i),
})
}
runtime.ReadMemStats(stat)
var memory2 = stat.HeapInuse
t.Log(memory1, memory2, (memory2-memory1)/1024/1024, "M")
runtime.ReadMemStats(stat)
var memory3 = stat.HeapInuse
t.Log(memory2, memory3, (memory3-memory2)/1024/1024, "M")
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Second)
}
}
func TestItems_Memory2(t *testing.T) {
var stat = &runtime.MemStats{}
runtime.ReadMemStats(stat)
var memory1 = stat.HeapInuse
var items = map[int32]map[string]zero.Zero{}
var count = 100
if testutils.IsSingleTesting() {
count = 10_000_000
}
for i := 0; i < count; i++ {
var week = int32((time.Now().Unix() - int64(86400*rands.Int(0, 300))) / (86400 * 7))
m, ok := items[week]
if !ok {
m = map[string]zero.Zero{}
items[week] = m
}
m[types.String(int64(i)*1_000_000)] = zero.New()
}
runtime.ReadMemStats(stat)
var memory2 = stat.HeapInuse
t.Log(memory1, memory2, (memory2-memory1)/1024/1024, "M")
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Second)
}
for w, i := range items {
t.Log(w, len(i))
}
}
func TestItem_RequestURI(t *testing.T) {
for _, u := range []string{
"https://goedge.cn/hello/world",
"https://goedge.cn:8080/hello/world",
"https://goedge.cn/hello/world?v=1&t=123",
} {
var item = &caches.Item{Key: u}
t.Log(u, "=>", item.RequestURI())
}
}

View File

@@ -0,0 +1,627 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"errors"
"fmt"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/types"
"net"
"net/url"
"os"
"runtime"
"strings"
"time"
)
type SQLiteFileListDB struct {
dbPath string
readDB *dbs.DB
writeDB *dbs.DB
hashMap *SQLiteFileListHashMap
itemsTableName string
isClosed bool // 是否已关闭
isReady bool // 是否已完成初始化
hashMapIsLoaded bool // Hash是否已加载
// cacheItems
existsByHashStmt *dbs.Stmt // 根据hash检查是否存在
insertStmt *dbs.Stmt // 写入数据
insertSQL string
selectByHashStmt *dbs.Stmt // 使用hash查询数据
selectHashListStmt *dbs.Stmt
deleteByHashStmt *dbs.Stmt // 根据hash删除数据
deleteByHashSQL string
statStmt *dbs.Stmt // 统计
purgeStmt *dbs.Stmt // 清理
deleteAllStmt *dbs.Stmt // 删除所有数据
listOlderItemsStmt *dbs.Stmt // 读取较早存储的缓存
}
func NewSQLiteFileListDB() *SQLiteFileListDB {
return &SQLiteFileListDB{
hashMap: NewSQLiteFileListHashMap(),
}
}
func (this *SQLiteFileListDB) Open(dbPath string) error {
this.dbPath = dbPath
// 动态调整Cache值
var cacheSize = 512
var memoryGB = memutils.SystemMemoryGB()
if memoryGB >= 1 {
cacheSize = 256 * memoryGB
}
// write db
// 这里不能加 EXCLUSIVE 锁,不然异步事务可能会失败
writeDB, err := dbs.OpenWriter("file:" + dbPath + "?cache=private&mode=rwc&_journal_mode=WAL&_sync=" + dbs.SyncMode + "&_cache_size=" + types.String(cacheSize) + "&_secure_delete=FAST")
if err != nil {
return fmt.Errorf("open write database failed: %w", err)
}
writeDB.SetMaxOpenConns(1)
this.writeDB = writeDB
// TODO 耗时过长,暂时不整理数据库
// TODO 需要根据行数来判断是否VACUUM
// TODO 注意VACUUM反而可能让数据库文件变大
/**_, err = db.Exec("VACUUM")
if err != nil {
return err
}**/
// 检查是否损坏
// TODO 暂时屏蔽,因为用时过长
var recoverEnv, _ = os.LookupEnv("EdgeRecover")
if len(recoverEnv) > 0 && this.shouldRecover() {
for _, indexName := range []string{"staleAt", "hash"} {
_, _ = this.writeDB.Exec(`REINDEX "` + indexName + `"`)
}
}
if teaconst.EnableDBStat {
this.writeDB.EnableStat(true)
}
// read db
readDB, err := dbs.OpenReader("file:" + dbPath + "?cache=private&mode=ro&_journal_mode=WAL&_sync=OFF&_cache_size=" + types.String(cacheSize))
if err != nil {
return fmt.Errorf("open read database failed: %w", err)
}
readDB.SetMaxOpenConns(runtime.NumCPU())
this.readDB = readDB
if teaconst.EnableDBStat {
this.readDB.EnableStat(true)
}
return nil
}
func (this *SQLiteFileListDB) Init() error {
this.itemsTableName = "cacheItems"
// 创建
var err = this.initTables(1)
if err != nil {
return fmt.Errorf("init tables failed: %w", err)
}
// 常用语句
this.existsByHashStmt, err = this.readDB.Prepare(`SELECT "expiredAt" FROM "` + this.itemsTableName + `" INDEXED BY "hash" WHERE "hash"=? AND expiredAt>? LIMIT 1`)
if err != nil {
return err
}
this.insertSQL = `INSERT INTO "` + this.itemsTableName + `" ("hash", "key", "headerSize", "bodySize", "metaSize", "expiredAt", "staleAt", "host", "serverId", "createdAt") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
this.insertStmt, err = this.writeDB.Prepare(this.insertSQL)
if err != nil {
return err
}
this.selectByHashStmt, err = this.readDB.Prepare(`SELECT "key", "headerSize", "bodySize", "metaSize", "expiredAt" FROM "` + this.itemsTableName + `" WHERE "hash"=? LIMIT 1`)
if err != nil {
return err
}
this.selectHashListStmt, err = this.readDB.Prepare(`SELECT "id", "hash" FROM "` + this.itemsTableName + `" WHERE id>? ORDER BY id ASC LIMIT 2000`)
if err != nil {
return err
}
this.deleteByHashSQL = `DELETE FROM "` + this.itemsTableName + `" WHERE "hash"=?`
this.deleteByHashStmt, err = this.writeDB.Prepare(this.deleteByHashSQL)
if err != nil {
return err
}
this.statStmt, err = this.readDB.Prepare(`SELECT COUNT(*), IFNULL(SUM(headerSize+bodySize+metaSize), 0), IFNULL(SUM(headerSize+bodySize), 0) FROM "` + this.itemsTableName + `"`)
if err != nil {
return err
}
this.purgeStmt, err = this.readDB.Prepare(`SELECT "hash" FROM "` + this.itemsTableName + `" WHERE staleAt<=? LIMIT ?`)
if err != nil {
return err
}
this.deleteAllStmt, err = this.writeDB.Prepare(`DELETE FROM "` + this.itemsTableName + `"`)
if err != nil {
return err
}
this.listOlderItemsStmt, err = this.readDB.Prepare(`SELECT "hash" FROM "` + this.itemsTableName + `" ORDER BY "id" ASC LIMIT ?`)
if err != nil {
return err
}
this.isReady = true
// 加载HashMap
go this.loadHashMap()
return nil
}
func (this *SQLiteFileListDB) IsReady() bool {
return this.isReady
}
func (this *SQLiteFileListDB) Total() (int64, error) {
// 读取总数量
var row = this.readDB.QueryRow(`SELECT COUNT(*) FROM "` + this.itemsTableName + `"`)
if row.Err() != nil {
return 0, row.Err()
}
var total int64
err := row.Scan(&total)
return total, err
}
func (this *SQLiteFileListDB) AddSync(hash string, item *Item) error {
this.hashMap.Add(hash)
if item.StaleAt == 0 {
item.StaleAt = item.ExpiresAt
}
_, err := this.insertStmt.Exec(hash, item.Key, item.HeaderSize, item.BodySize, item.MetaSize, item.ExpiresAt, item.StaleAt, item.Host, item.ServerId, fasttime.Now().Unix())
if err != nil {
return this.WrapError(err)
}
return nil
}
func (this *SQLiteFileListDB) DeleteSync(hash string) error {
this.hashMap.Delete(hash)
_, err := this.deleteByHashStmt.Exec(hash)
if err != nil {
return err
}
return nil
}
func (this *SQLiteFileListDB) ListExpiredItems(count int) (hashList []string, err error) {
if !this.isReady {
return nil, nil
}
if count <= 0 {
count = 100
}
rows, err := this.purgeStmt.Query(time.Now().Unix(), count)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
for rows.Next() {
var hash string
err = rows.Scan(&hash)
if err != nil {
return nil, err
}
hashList = append(hashList, hash)
}
return hashList, nil
}
func (this *SQLiteFileListDB) ListLFUItems(count int) (hashList []string, err error) {
if !this.isReady {
return nil, nil
}
if count <= 0 {
count = 100
}
// 先找过期的
hashList, err = this.ListExpiredItems(count)
if err != nil {
return
}
var l = len(hashList)
// 从旧缓存中补充
if l < count {
oldHashList, err := this.listOlderItems(count - l)
if err != nil {
return nil, err
}
hashList = append(hashList, oldHashList...)
}
return hashList, nil
}
func (this *SQLiteFileListDB) ListHashes(lastId int64) (hashList []string, maxId int64, err error) {
rows, err := this.selectHashListStmt.Query(lastId)
if err != nil {
return nil, 0, err
}
var id int64
var hash string
for rows.Next() {
err = rows.Scan(&id, &hash)
if err != nil {
_ = rows.Close()
return
}
maxId = id
hashList = append(hashList, hash)
}
_ = rows.Close()
return
}
func (this *SQLiteFileListDB) IncreaseHitAsync(hash string) error {
// do nothing
return nil
}
func (this *SQLiteFileListDB) CleanPrefix(prefix string) error {
if !this.isReady {
return nil
}
var count = int64(10000)
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
for {
result, err := this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET expiredAt=0,staleAt=? WHERE id IN (SELECT id FROM "`+this.itemsTableName+`" WHERE expiredAt>0 AND createdAt<=? AND INSTR("key", ?)=1 LIMIT `+types.String(count)+`)`, unixTime+DefaultStaleCacheSeconds, unixTime, prefix)
if err != nil {
return this.WrapError(err)
}
affectedRows, err := result.RowsAffected()
if err != nil {
return err
}
if affectedRows < count {
return nil
}
}
}
func (this *SQLiteFileListDB) CleanMatchKey(key string) error {
if !this.isReady {
return nil
}
// 忽略 @GOEDGE_
if strings.Contains(key, SuffixAll) {
return nil
}
u, err := url.Parse(key)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
// 转义
var queryKey = strings.ReplaceAll(key, "%", "\\%")
queryKey = strings.ReplaceAll(queryKey, "_", "\\_")
queryKey = strings.Replace(queryKey, "*", "%", 1)
// TODO 检查大批量数据下的操作性能
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryKey)
if err != nil {
return err
}
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryKey+SuffixAll+"%")
if err != nil {
return err
}
return nil
}
func (this *SQLiteFileListDB) CleanMatchPrefix(prefix string) error {
if !this.isReady {
return nil
}
u, err := url.Parse(prefix)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
// 转义
var queryPrefix = strings.ReplaceAll(prefix, "%", "\\%")
queryPrefix = strings.ReplaceAll(queryPrefix, "_", "\\_")
queryPrefix = strings.Replace(queryPrefix, "*", "%", 1)
queryPrefix += "%"
// TODO 检查大批量数据下的操作性能
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryPrefix)
return err
}
func (this *SQLiteFileListDB) CleanAll() error {
if !this.isReady {
return nil
}
_, err := this.deleteAllStmt.Exec()
if err != nil {
return this.WrapError(err)
}
this.hashMap.Clean()
return nil
}
func (this *SQLiteFileListDB) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
this.isReady = false
if this.existsByHashStmt != nil {
_ = this.existsByHashStmt.Close()
}
if this.insertStmt != nil {
_ = this.insertStmt.Close()
}
if this.selectByHashStmt != nil {
_ = this.selectByHashStmt.Close()
}
if this.selectHashListStmt != nil {
_ = this.selectHashListStmt.Close()
}
if this.deleteByHashStmt != nil {
_ = this.deleteByHashStmt.Close()
}
if this.statStmt != nil {
_ = this.statStmt.Close()
}
if this.purgeStmt != nil {
_ = this.purgeStmt.Close()
}
if this.deleteAllStmt != nil {
_ = this.deleteAllStmt.Close()
}
if this.listOlderItemsStmt != nil {
_ = this.listOlderItemsStmt.Close()
}
var errStrings []string
if this.readDB != nil {
err := this.readDB.Close()
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
if this.writeDB != nil {
err := this.writeDB.Close()
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
if len(errStrings) == 0 {
return nil
}
return errors.New("close database failed: " + strings.Join(errStrings, ", "))
}
func (this *SQLiteFileListDB) WrapError(err error) error {
if err == nil {
return nil
}
return fmt.Errorf("%w (file: %s)", err, this.dbPath)
}
func (this *SQLiteFileListDB) HashMapIsLoaded() bool {
return this.hashMapIsLoaded
}
// 初始化
func (this *SQLiteFileListDB) initTables(times int) error {
{
// expiredAt - 过期时间,用来判断有无过期
// staleAt - 过时缓存最大时间,用来清理缓存
// 不对 hash 增加 unique 参数,是尽可能避免产生 malformed 错误
_, err := this.writeDB.Exec(`CREATE TABLE IF NOT EXISTS "` + this.itemsTableName + `" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"hash" varchar(32),
"key" varchar(1024),
"tag" varchar(64),
"headerSize" integer DEFAULT 0,
"bodySize" integer DEFAULT 0,
"metaSize" integer DEFAULT 0,
"expiredAt" integer DEFAULT 0,
"staleAt" integer DEFAULT 0,
"createdAt" integer DEFAULT 0,
"host" varchar(128),
"serverId" integer
);
DROP INDEX IF EXISTS "createdAt";
DROP INDEX IF EXISTS "expiredAt";
DROP INDEX IF EXISTS "serverId";
CREATE INDEX IF NOT EXISTS "staleAt"
ON "` + this.itemsTableName + `" (
"staleAt" ASC
);
CREATE INDEX IF NOT EXISTS "hash"
ON "` + this.itemsTableName + `" (
"hash" ASC
);
`)
if err != nil {
// 忽略可以预期的错误
if strings.Contains(err.Error(), "duplicate column name") {
err = nil
}
// 尝试删除重建
if err != nil {
if times < 3 {
_, dropErr := this.writeDB.Exec(`DROP TABLE "` + this.itemsTableName + `"`)
if dropErr == nil {
return this.initTables(times + 1)
}
return this.WrapError(err)
}
return this.WrapError(err)
}
}
}
// 删除hits表
{
_, _ = this.writeDB.Exec(`DROP TABLE "hits"`)
}
return nil
}
func (this *SQLiteFileListDB) listOlderItems(count int) (hashList []string, err error) {
rows, err := this.listOlderItemsStmt.Query(count)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
for rows.Next() {
var hash string
err = rows.Scan(&hash)
if err != nil {
return nil, err
}
hashList = append(hashList, hash)
}
return hashList, nil
}
func (this *SQLiteFileListDB) shouldRecover() bool {
result, err := this.writeDB.Query("pragma integrity_check;")
if err != nil {
logs.Println(result)
}
var errString = ""
var shouldRecover = false
if result.Next() {
_ = result.Scan(&errString)
if strings.TrimSpace(errString) != "ok" {
shouldRecover = true
}
}
_ = result.Close()
return shouldRecover
}
// 删除数据库文件
func (this *SQLiteFileListDB) deleteDB() {
_ = fsutils.Remove(this.dbPath)
_ = fsutils.Remove(this.dbPath + "-shm")
_ = fsutils.Remove(this.dbPath + "-wal")
}
// 加载Hash列表
func (this *SQLiteFileListDB) loadHashMap() {
this.hashMapIsLoaded = false
err := this.hashMap.Load(this)
if err != nil {
remotelogs.Error("LIST_FILE_DB", "load hash map failed: "+err.Error()+"(file: "+this.dbPath+")")
// 自动修复错误
// TODO 将来希望能尽可能恢复以往数据库中的内容
if strings.Contains(err.Error(), "database is closed") || strings.Contains(err.Error(), "database disk image is malformed") {
_ = this.Close()
this.deleteDB()
remotelogs.Println("LIST_FILE_DB", "recreating the database (file:"+this.dbPath+") ...")
err = this.Open(this.dbPath)
if err != nil {
remotelogs.Error("LIST_FILE_DB", "recreate the database failed: "+err.Error()+" (file:"+this.dbPath+")")
} else {
_ = this.Init()
}
}
return
}
this.hashMapIsLoaded = true
}

View File

@@ -0,0 +1,170 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
"runtime"
"runtime/debug"
"testing"
"time"
)
func TestFileListDB_ListLFUItems(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
//err := db.Open(Tea.Root + "/data/cache-index/p1/db-0.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
hashList, err := db.ListLFUItems(100)
if err != nil {
t.Fatal(err)
}
t.Log("[", len(hashList), "]", hashList)
}
func TestFileListDB_CleanMatchKey(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchKey("https://*.goedge.cn/large-text")
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchKey("https://*.goedge.cn:1234/large-text?%2B____")
if err != nil {
t.Fatal(err)
}
}
func TestFileListDB_CleanMatchPrefix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchPrefix("https://*.goedge.cn/large-text")
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchPrefix("https://*.goedge.cn:1234/large-text?%2B____")
if err != nil {
t.Fatal(err)
}
}
func TestFileListDB_Memory(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-index/p1/db-0.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
t.Log(db.Total())
// load hashes
var maxId int64
var hashList []string
var before = time.Now()
for i := 0; i < 1_000; i++ {
hashList, maxId, err = db.ListHashes(maxId)
if err != nil {
t.Fatal(err)
}
if len(hashList) == 0 {
t.Log("hashes loaded", time.Since(before).Seconds()*1000, "ms")
break
}
if i%100 == 0 {
t.Log(i)
}
}
runtime.GC()
debug.FreeOSMemory()
//time.Sleep(600 * time.Second)
for i := 0; i < 1_000; i++ {
_, err = db.ListLFUItems(5000)
if err != nil {
t.Fatal(err)
}
if i%100 == 0 {
t.Log(i)
}
}
t.Log("loaded")
runtime.GC()
debug.FreeOSMemory()
time.Sleep(600 * time.Second)
}

View File

@@ -0,0 +1,183 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"math/big"
"sync"
)
const HashMapSharding = 31
var bigIntPool = sync.Pool{
New: func() any {
return big.NewInt(0)
},
}
// SQLiteFileListHashMap 文件Hash列表
type SQLiteFileListHashMap struct {
m []map[uint64]zero.Zero
lockers []*sync.RWMutex
isAvailable bool
isReady bool
}
func NewSQLiteFileListHashMap() *SQLiteFileListHashMap {
var m = make([]map[uint64]zero.Zero, HashMapSharding)
var lockers = make([]*sync.RWMutex, HashMapSharding)
for i := 0; i < HashMapSharding; i++ {
m[i] = map[uint64]zero.Zero{}
lockers[i] = &sync.RWMutex{}
}
return &SQLiteFileListHashMap{
m: m,
lockers: lockers,
isAvailable: false,
isReady: false,
}
}
func (this *SQLiteFileListHashMap) Load(db *SQLiteFileListDB) error {
// 如果系统内存过小,我们不缓存
if memutils.SystemMemoryGB() < 3 {
return nil
}
this.isAvailable = true
var lastId int64
var maxLoops = 50_000
for {
hashList, maxId, err := db.ListHashes(lastId)
if err != nil {
return err
}
if len(hashList) == 0 {
break
}
this.AddHashes(hashList)
lastId = maxId
maxLoops--
if maxLoops <= 0 {
break
}
}
this.isReady = true
return nil
}
func (this *SQLiteFileListHashMap) Add(hash string) {
if !this.isAvailable {
return
}
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
this.m[index][hashInt] = zero.New()
this.lockers[index].Unlock()
}
func (this *SQLiteFileListHashMap) AddHashes(hashes []string) {
if !this.isAvailable {
return
}
for _, hash := range hashes {
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
this.m[index][hashInt] = zero.New()
this.lockers[index].Unlock()
}
}
func (this *SQLiteFileListHashMap) Delete(hash string) {
if !this.isAvailable {
return
}
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
delete(this.m[index], hashInt)
this.lockers[index].Unlock()
}
func (this *SQLiteFileListHashMap) Exist(hash string) bool {
if !this.isAvailable {
return true
}
if !this.isReady {
// 只有完全Ready时才能判断是否为false
return true
}
hashInt, index := this.bigInt(hash)
this.lockers[index].RLock()
_, ok := this.m[index][hashInt]
this.lockers[index].RUnlock()
return ok
}
func (this *SQLiteFileListHashMap) Clean() {
for i := 0; i < HashMapSharding; i++ {
this.lockers[i].Lock()
}
// 这里不能简单清空 this.m ,避免导致别的数据无法写入 map 而产生 panic
for i := 0; i < HashMapSharding; i++ {
this.m[i] = map[uint64]zero.Zero{}
}
for i := HashMapSharding - 1; i >= 0; i-- {
this.lockers[i].Unlock()
}
}
func (this *SQLiteFileListHashMap) IsReady() bool {
return this.isReady
}
func (this *SQLiteFileListHashMap) Len() int {
for i := 0; i < HashMapSharding; i++ {
this.lockers[i].Lock()
}
var count = 0
for _, shard := range this.m {
count += len(shard)
}
for i := HashMapSharding - 1; i >= 0; i-- {
this.lockers[i].Unlock()
}
return count
}
func (this *SQLiteFileListHashMap) SetIsAvailable(isAvailable bool) {
this.isAvailable = isAvailable
}
func (this *SQLiteFileListHashMap) SetIsReady(isReady bool) {
this.isReady = isReady
}
func (this *SQLiteFileListHashMap) bigInt(hash string) (hashInt uint64, index int) {
var bigInt = bigIntPool.Get().(*big.Int)
bigInt.SetString(hash, 16)
hashInt = bigInt.Uint64()
bigIntPool.Put(bigInt)
index = int(hashInt % HashMapSharding)
return
}

View File

@@ -0,0 +1,169 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/assert"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/big"
"runtime"
"strconv"
"testing"
"time"
)
func TestFileListHashMap_Memory(t *testing.T) {
var stat1 = &runtime.MemStats{}
runtime.ReadMemStats(stat1)
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
for i := 0; i < 1_000_000; i++ {
m.Add(stringutil.Md5(types.String(i)))
}
t.Log("added:", m.Len(), "hashes")
var stat2 = &runtime.MemStats{}
runtime.ReadMemStats(stat2)
t.Log("ready", (stat2.Alloc-stat1.Alloc)/1024/1024, "M")
t.Log("remains:", m.Len(), "hashes")
}
func TestFileListHashMap_Memory2(t *testing.T) {
var stat1 = &runtime.MemStats{}
runtime.ReadMemStats(stat1)
var m = map[uint64]zero.Zero{}
for i := 0; i < 1_000_000; i++ {
m[uint64(i)] = zero.New()
}
var stat2 = &runtime.MemStats{}
runtime.ReadMemStats(stat2)
t.Log("ready", (stat2.Alloc-stat1.Alloc)/1024/1024, "M")
}
func TestFileListHashMap_BigInt(t *testing.T) {
var bigInt = big.NewInt(0)
for _, s := range []string{"1", "2", "3", "123", "123456"} {
var hash = stringutil.Md5(s)
var bigInt1 = big.NewInt(0)
bigInt1.SetString(hash, 16)
bigInt.SetString(hash, 16)
t.Log(s, "=>", bigInt1.Uint64(), "hash:", hash, "format:", strconv.FormatUint(bigInt1.Uint64(), 16), strconv.FormatUint(bigInt.Uint64(), 16))
if strconv.FormatUint(bigInt1.Uint64(), 16) != strconv.FormatUint(bigInt.Uint64(), 16) {
t.Fatal("not equal")
}
}
for i := 0; i < 1_000_000; i++ {
var hash = stringutil.Md5(types.String(i))
var bigInt1 = big.NewInt(0)
bigInt1.SetString(hash, 16)
bigInt.SetString(hash, 16)
if bigInt1.Uint64() != bigInt.Uint64() {
t.Fatal(i, "not equal")
}
}
}
func TestFileListHashMap_Load(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var m = caches.NewSQLiteFileListHashMap()
var before = time.Now()
var db = list.GetDB("abc")
err = m.Load(db)
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log("count:", m.Len())
m.Add("abc")
for _, hash := range []string{"33347bb4441265405347816cad36a0f8", "a", "abc", "123"} {
t.Log(hash, "=>", m.Exist(hash))
}
}
func TestFileListHashMap_Delete(t *testing.T) {
var a = assert.NewAssertion(t)
var m = caches.NewSQLiteFileListHashMap()
m.SetIsReady(true)
m.SetIsAvailable(true)
m.Add("a")
a.IsTrue(m.Len() == 1)
m.Delete("a")
a.IsTrue(m.Len() == 0)
}
func TestFileListHashMap_Clean(t *testing.T) {
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
m.Clean()
m.Add("a")
}
func Benchmark_BigInt(b *testing.B) {
var hash = stringutil.Md5("123456")
b.ResetTimer()
for i := 0; i < b.N; i++ {
var bigInt = big.NewInt(0)
bigInt.SetString(hash, 16)
_ = bigInt.Uint64()
}
}
func BenchmarkFileListHashMap_Exist(b *testing.B) {
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
m.SetIsReady(true)
for i := 0; i < 1_000_000; i++ {
m.Add(types.String(i))
}
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Add(types.String(rands.Int64()))
_ = m.Exist(types.String(rands.Int64()))
}
})
}

View File

@@ -0,0 +1,350 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/iwind/TeaGo/types"
"strings"
"testing"
)
const countKVStores = 10
type KVFileList struct {
dir string
stores [countKVStores]*KVListFileStore
onAdd func(item *Item)
onRemove func(item *Item)
memCache *ttlcache.Cache[int64]
}
func NewKVFileList(dir string) *KVFileList {
var memGB = memutils.SystemMemoryGB()
if memGB <= 0 {
memGB = 1
}
var maxCachePieces = 32
var maxCacheItems = memGB << 15
var memCache = ttlcache.NewCache[int64](ttlcache.NewPiecesOption(maxCachePieces), ttlcache.NewMaxItemsOption(maxCacheItems))
dir = strings.TrimSuffix(dir, "/")
var stores = [countKVStores]*KVListFileStore{}
for i := 0; i < countKVStores; i++ {
stores[i] = NewKVListFileStore(dir+"/db-"+types.String(i)+".store", memCache)
}
return &KVFileList{
dir: dir,
stores: stores,
memCache: memCache,
}
}
// Init 初始化
func (this *KVFileList) Init() error {
remotelogs.Println("CACHE", "loading database from '"+this.dir+"' ...")
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.Open()
if err != nil {
lastErr = fmt.Errorf("open store '"+storeCopy.Path()+"' failed: %w", err)
}
})
}
group.Wait()
return lastErr
}
// Reset 重置数据
func (this *KVFileList) Reset() error {
this.memCache.Clean()
return nil
}
// Add 添加内容
func (this *KVFileList) Add(hash string, item *Item) error {
err := this.getStore(hash).AddItem(hash, item)
if err != nil {
return err
}
if this.onAdd != nil {
this.onAdd(item)
}
if item.ExpiresAt > 0 {
this.memCache.Write(hash, item.HeaderSize+item.BodySize, min(item.ExpiresAt, fasttime.Now().Unix()+3600))
}
return nil
}
// Exist 检查内容是否存在
func (this *KVFileList) Exist(hash string) (bool, int64, error) {
// read from cache
var cacheItem = this.memCache.Read(hash)
if cacheItem != nil {
return true, cacheItem.Value, nil
}
return this.getStore(hash).ExistItem(hash)
}
// ExistQuick 快速检查内容是否存在
func (this *KVFileList) ExistQuick(hash string) (bool, error) {
// read from cache
if this.memCache.Read(hash) != nil {
return true, nil
}
return this.getStore(hash).ExistQuickItem(hash)
}
// CleanPrefix 清除某个前缀的缓存
func (this *KVFileList) CleanPrefix(prefix string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithPrefix(prefix)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// CleanMatchKey 清除通配符匹配的Key
func (this *KVFileList) CleanMatchKey(key string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithWildcardKey(key)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// CleanMatchPrefix 清除通配符匹配的前缀
func (this *KVFileList) CleanMatchPrefix(prefix string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithWildcardPrefix(prefix)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// Remove 删除内容
func (this *KVFileList) Remove(hash string) error {
err := this.getStore(hash).RemoveItem(hash)
if err != nil {
return err
}
if this.onRemove != nil {
// when remove file item, no any extra information needed
this.onRemove(nil)
}
// remove from cache
this.memCache.Delete(hash)
return nil
}
// Purge 清理过期数据
func (this *KVFileList) Purge(count int, callback func(hash string) error) (int, error) {
count /= countKVStores
if count <= 0 {
count = 100
}
var countFound = 0
var lastErr error
for _, store := range this.stores {
purgeCount, err := store.PurgeItems(count, callback)
countFound += purgeCount
if err != nil {
lastErr = err
}
}
return countFound, lastErr
}
// PurgeLFU 清理LFU数据
func (this *KVFileList) PurgeLFU(count int, callback func(hash string) error) error {
count /= countKVStores
if count <= 0 {
count = 100
}
var lastErr error
for _, store := range this.stores {
err := store.PurgeLFUItems(count, callback)
if err != nil {
lastErr = err
}
}
return lastErr
}
// CleanAll 清除所有缓存
func (this *KVFileList) CleanAll() error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.RemoveAllItems()
if err != nil {
lastErr = err
}
})
}
group.Wait()
this.memCache.Clean()
return lastErr
}
// Stat 统计
func (this *KVFileList) Stat(check func(hash string) bool) (*Stat, error) {
var stat = &Stat{}
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
storeStat, err := storeCopy.StatItems()
if err != nil {
lastErr = err
return
}
group.Lock()
stat.Size += storeStat.Size
stat.ValueSize += storeStat.ValueSize
stat.Count += storeStat.Count
group.Unlock()
})
}
group.Wait()
return stat, lastErr
}
// Count 总数量
func (this *KVFileList) Count() (int64, error) {
var count int64
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
countStoreItems, err := storeCopy.CountItems()
if err != nil {
lastErr = err
return
}
group.Lock()
count += countStoreItems
group.Unlock()
})
}
group.Wait()
return count, lastErr
}
// OnAdd 添加事件
func (this *KVFileList) OnAdd(fn func(item *Item)) {
this.onAdd = fn
}
// OnRemove 删除事件
func (this *KVFileList) OnRemove(fn func(item *Item)) {
this.onRemove = fn
}
// Close 关闭
func (this *KVFileList) Close() error {
var lastErr error
var group = goman.NewTaskGroup()
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.Close()
if err != nil {
lastErr = err
}
})
}
group.Wait()
this.memCache.Destroy()
return lastErr
}
// IncreaseHit 增加点击量
func (this *KVFileList) IncreaseHit(hash string) error {
// do nothing
return nil
}
func (this *KVFileList) TestInspect(t *testing.T) error {
for _, store := range this.stores {
err := store.TestInspect(t)
if err != nil {
return err
}
}
return nil
}
func (this *KVFileList) getStore(hash string) *KVListFileStore {
return this.stores[fnv.HashString(hash)%countKVStores]
}

View File

@@ -0,0 +1,66 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"encoding/binary"
"encoding/json"
"strings"
)
// ItemKVEncoder item encoder
type ItemKVEncoder[T interface{ *Item }] struct {
}
func NewItemKVEncoder[T interface{ *Item }]() *ItemKVEncoder[T] {
return &ItemKVEncoder[T]{}
}
func (this *ItemKVEncoder[T]) Encode(value T) ([]byte, error) {
return json.Marshal(value)
}
func (this *ItemKVEncoder[T]) EncodeField(value T, fieldName string) ([]byte, error) {
switch fieldName {
case "createdAt":
var b = make([]byte, 4)
var createdAt = any(value).(*Item).CreatedAt
binary.BigEndian.PutUint32(b, uint32(createdAt))
return b, nil
case "staleAt":
var b = make([]byte, 4)
var staleAt = any(value).(*Item).StaleAt
if staleAt < 0 {
staleAt = 0
}
binary.BigEndian.PutUint32(b, uint32(staleAt))
return b, nil
case "serverId":
var b = make([]byte, 4)
var serverId = any(value).(*Item).ServerId
if serverId < 0 {
serverId = 0
}
binary.BigEndian.PutUint32(b, uint32(serverId))
return b, nil
case "key":
return []byte(any(value).(*Item).Key), nil
case "wildKey":
var key = any(value).(*Item).Key
var dotIndex = strings.Index(key, ".")
if dotIndex > 0 {
var slashIndex = strings.LastIndex(key[:dotIndex], "/")
if slashIndex > 0 {
key = key[:dotIndex][:slashIndex+1] + "*" + key[dotIndex:]
}
}
return []byte(key), nil
}
return nil, nil
}
func (this *ItemKVEncoder[T]) Decode(valueBytes []byte) (value T, err error) {
err = json.Unmarshal(valueBytes, &value)
return
}

View File

@@ -0,0 +1,69 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestItemKVEncoder_EncodeField(t *testing.T) {
var a = assert.NewAssertion(t)
var encoder = caches.NewItemKVEncoder[*caches.Item]()
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://example.com/index.html",
}, "key")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://example.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "example.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://*.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://www.example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://*.example.com/index.html")
}
}

View File

@@ -0,0 +1,503 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"errors"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/kvstore"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/cockroachdb/pebble"
"regexp"
"strings"
"testing"
)
type KVListFileStore struct {
path string
rawStore *kvstore.Store
// tables
itemsTable *kvstore.Table[*Item]
rawIsReady bool
memCache *ttlcache.Cache[int64]
}
func NewKVListFileStore(path string, memCache *ttlcache.Cache[int64]) *KVListFileStore {
return &KVListFileStore{
path: path,
memCache: memCache,
}
}
func (this *KVListFileStore) Open() error {
var reg = regexp.MustCompile(`^(.+)/([\w-]+)(\.store)$`)
var matches = reg.FindStringSubmatch(this.path)
if len(matches) != 4 {
return errors.New("invalid path '" + this.path + "'")
}
var dir = matches[1]
var name = matches[2]
rawStore, err := kvstore.OpenStoreDir(dir, name)
if err != nil {
return err
}
this.rawStore = rawStore
db, err := rawStore.NewDB("cache")
if err != nil {
return err
}
{
table, tableErr := kvstore.NewTable[*Item]("items", NewItemKVEncoder[*Item]())
if tableErr != nil {
return tableErr
}
err = table.AddFields("staleAt", "key", "wildKey", "createdAt")
if err != nil {
return err
}
db.AddTable(table)
this.itemsTable = table
}
this.rawIsReady = true
return nil
}
func (this *KVListFileStore) Path() string {
return this.path
}
func (this *KVListFileStore) AddItem(hash string, item *Item) error {
if !this.isReady() {
return nil
}
var currentTime = fasttime.Now().Unix()
if item.ExpiresAt <= currentTime {
return nil
}
if item.CreatedAt <= 0 {
item.CreatedAt = currentTime
}
if item.StaleAt <= 0 {
item.StaleAt = item.ExpiresAt + DefaultStaleCacheSeconds
}
return this.itemsTable.Set(hash, item)
}
func (this *KVListFileStore) ExistItem(hash string) (bool, int64, error) {
if !this.isReady() {
return false, -1, nil
}
item, err := this.itemsTable.Get(hash)
if err != nil {
if kvstore.IsNotFound(err) {
return false, -1, nil
}
return false, -1, err
}
if item == nil {
return false, -1, nil
}
if item.ExpiresAt <= fasttime.Now().Unix() {
return false, 0, nil
}
// write to cache
this.memCache.Write(hash, item.HeaderSize+item.BodySize, min(item.ExpiresAt, fasttime.Now().Unix()+3600))
return true, item.HeaderSize + item.BodySize, nil
}
func (this *KVListFileStore) ExistQuickItem(hash string) (bool, error) {
if !this.isReady() {
return false, nil
}
return this.itemsTable.Exist(hash)
}
func (this *KVListFileStore) RemoveItem(hash string) error {
if !this.isReady() {
return nil
}
return this.itemsTable.Delete(hash)
}
func (this *KVListFileStore) RemoveAllItems() error {
if !this.isReady() {
return nil
}
return this.itemsTable.Truncate()
}
func (this *KVListFileStore) PurgeItems(count int, callback func(hash string) error) (int, error) {
if !this.isReady() {
return 0, nil
}
var countFound int
var currentTime = fasttime.Now().Unix()
var hashList []string
err := this.itemsTable.
Query().
FieldAsc("staleAt").
Limit(count).
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
if item.Value.StaleAt < currentTime {
countFound++
hashList = append(hashList, item.Key)
return true, nil
}
return false, nil
})
if err != nil {
return 0, err
}
// delete items
if len(hashList) > 0 {
txErr := this.itemsTable.WriteTx(func(tx *kvstore.Tx[*Item]) error {
for _, hash := range hashList {
deleteErr := tx.Delete(hash)
if deleteErr != nil {
return deleteErr
}
this.memCache.Delete(hash)
}
return nil
})
if txErr != nil {
return 0, txErr
}
for _, hash := range hashList {
callbackErr := callback(hash)
if callbackErr != nil {
return 0, callbackErr
}
}
}
return countFound, nil
}
func (this *KVListFileStore) PurgeLFUItems(count int, callback func(hash string) error) error {
if !this.isReady() {
return nil
}
var hashList []string
err := this.itemsTable.
Query().
FieldAsc("createdAt").
Limit(count).
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value != nil {
hashList = append(hashList, item.Key)
}
return true, nil
})
if err != nil {
return err
}
// delete items
if len(hashList) > 0 {
txErr := this.itemsTable.WriteTx(func(tx *kvstore.Tx[*Item]) error {
for _, hash := range hashList {
deleteErr := tx.Delete(hash)
if deleteErr != nil {
return deleteErr
}
this.memCache.Delete(hash)
}
return nil
})
if txErr != nil {
return txErr
}
for _, hash := range hashList {
callbackErr := callback(hash)
if callbackErr != nil {
return callbackErr
}
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithPrefix(prefix string) error {
if !this.isReady() {
return nil
}
if len(prefix) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
var fieldOffset []byte
const size = 1000
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("key", prefix).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithWildcardPrefix(prefix string) error {
if !this.isReady() {
return nil
}
if len(prefix) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
var fieldOffset []byte
const size = 1000
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("wildKey", prefix).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithWildcardKey(key string) error {
if !this.isReady() {
return nil
}
if len(key) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
for _, realKey := range []string{key, key + SuffixAll} {
var fieldOffset = append(this.itemsTable.FieldKey("wildKey"), '$')
fieldOffset = append(fieldOffset, realKey...)
const size = 1000
var wildKey string
if !strings.HasSuffix(realKey, SuffixAll) {
wildKey = string(append([]byte(realKey), 0, 0))
} else {
wildKey = realKey
}
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("wildKey", wildKey).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
}
return nil
}
func (this *KVListFileStore) CountItems() (int64, error) {
if !this.isReady() {
return 0, nil
}
return this.itemsTable.Count()
}
func (this *KVListFileStore) StatItems() (*Stat, error) {
if !this.isReady() {
return &Stat{}, nil
}
var stat = &Stat{}
err := this.itemsTable.
Query().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value != nil {
stat.Size += item.Value.Size()
stat.ValueSize += item.Value.BodySize
stat.Count++
}
return true, nil
})
return stat, err
}
func (this *KVListFileStore) TestInspect(t *testing.T) error {
if !this.isReady() {
return nil
}
it, err := this.rawStore.RawDB().NewIter(&pebble.IterOptions{})
if err != nil {
return err
}
defer func() {
_ = it.Close()
}()
for it.First(); it.Valid(); it.Next() {
valueBytes, valueErr := it.ValueAndErr()
if valueErr != nil {
return valueErr
}
t.Log(string(it.Key()), "=>", string(valueBytes))
}
return nil
}
func (this *KVListFileStore) Close() error {
if this.rawStore != nil {
return this.rawStore.Close()
}
return nil
}
func (this *KVListFileStore) isReady() bool {
return this.rawIsReady && !this.rawStore.IsClosed()
}

View File

@@ -0,0 +1,432 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/rand"
"strconv"
"sync"
"testing"
"time"
)
var testingKVList *caches.KVFileList
func testOpenKVFileList(t *testing.T) *caches.KVFileList {
var list = caches.NewKVFileList(Tea.Root + "/data/stores/cache-stores")
err := list.Init()
if err != nil {
t.Fatal(err)
}
testingKVList = list
return list
}
func TestNewKVFileList(t *testing.T) {
var list = testOpenKVFileList(t)
err := list.Close()
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Add(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.Add(stringutil.Md5("123456"), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://example.com/index.html",
ExpiresAt: time.Now().Unix() + 60,
StaleAt: 0,
HeaderSize: 0,
BodySize: 4096,
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Add_Many(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
const start = 0
const count = 1_000_000
const concurrent = 100
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fs", costSeconds), "qps:", fmt.Sprintf("%.2fK/s", float64(count)/1000/costSeconds))
}()
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for c := 0; c < concurrent; c++ {
go func(c int) {
defer wg.Done()
var segmentStart = start + count/concurrent*c
for i := segmentStart; i < segmentStart+count/concurrent; i++ {
err := list.Add(stringutil.Md5(strconv.Itoa(i)), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://www.example.com/index.html" + strconv.Itoa(i),
ExpiresAt: time.Now().Unix() + 3600,
StaleAt: 0,
HeaderSize: 0,
BodySize: int64(rand.Int() % 1_000_000),
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Log(err)
}
}
}(c)
}
wg.Wait()
}
func TestKVFileList_Add_Many_Suffix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
const start = 0
const count = 1000
const concurrent = 100
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fs", costSeconds), "qps:", fmt.Sprintf("%.2fK/s", float64(count)/1000/costSeconds))
}()
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for c := 0; c < concurrent; c++ {
go func(c int) {
defer wg.Done()
var segmentStart = start + count/concurrent*c
for i := segmentStart; i < segmentStart+count/concurrent; i++ {
err := list.Add(stringutil.Md5(strconv.Itoa(i)+caches.SuffixAll), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://www.example.com/index.html" + strconv.Itoa(i) + caches.SuffixAll + "zip",
ExpiresAt: time.Now().Unix() + 60,
StaleAt: 0,
HeaderSize: 0,
BodySize: int64(rand.Int() % 1_000_000),
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Log(err)
}
}
}(c)
}
wg.Wait()
}
func TestKVFileList_Exist(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
b, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "=>", b)
}
}
func TestKVFileList_ExistMany(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var countFound int
var count = 10
if testutils.IsSingleTesting() {
count = 2_000_000
}
var before = time.Now()
for i := 0; i < count; i++ {
ok, _, err := list.Exist(stringutil.Md5(strconv.Itoa(i)))
if err != nil {
t.Fatal(err)
}
if ok {
countFound++
}
}
var costSeconds = time.Since(before).Seconds()
t.Log("total:", costSeconds*1000, "ms", "found:", countFound, "qps:", fmt.Sprintf("%.2fK/s", float64(count)/costSeconds/1000), "per read:", fmt.Sprintf("%.4fms", costSeconds*1000/float64(count)))
}
func TestKVFileList_ExistQuick(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
b, err := list.ExistQuick(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "=>", b)
}
}
func TestKVFileList_Remove(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
err := list.Remove(hash)
if err != nil {
t.Fatal(err)
}
}
}
func TestKVFileList_RemoveMany(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var count = 10
if testutils.IsSingleTesting() {
count = 2_000_000
}
var before = time.Now()
for i := 0; i < count; i++ {
err := list.Remove(stringutil.Md5(strconv.Itoa(i)))
if err != nil {
t.Fatal(err)
}
}
var costSeconds = time.Since(before).Seconds()
t.Log("total:", costSeconds*1000, "ms", "qps:", fmt.Sprintf("%.2fK/s", float64(count)/costSeconds/1000), "per delete:", fmt.Sprintf("%.4fms", costSeconds*1000/float64(count)))
}
func TestKVFileList_CleanAll(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.CleanAll()
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Inspect(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.TestInspect(t)
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Purge(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
count, err := list.Purge(4_000, func(hash string) error {
//t.Log("hash:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "count:", count)
}
func TestKVFileList_PurgeLFU(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
err := list.PurgeLFU(20000, func(hash string) error {
t.Log("hash:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000))
}
func TestKVFileList_Count(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "count:", count)
}
func TestKVFileList_Stat(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
stat, err := list.Stat(func(hash string) bool {
return true
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "stat:", fmt.Sprintf("%+v", stat))
}
func TestKVFileList_CleanPrefix(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanPrefix("https://www.example.com/index.html")
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_CleanMatchPrefix(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanMatchPrefix("https://*.example.com/index.html")
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_CleanMatchKey(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanMatchKey("https://*.example.com/index.html123")
if err != nil {
t.Fatal(err)
}
}
func BenchmarkKVFileList_Exist(b *testing.B) {
var list = caches.NewKVFileList(Tea.Root + "/data/stores/cache-stores")
err := list.Init()
if err != nil {
b.Fatal(err)
}
defer func() {
_ = list.Close()
}()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _, existErr := list.Exist(stringutil.Md5(strconv.Itoa(rand.Int() % 2_000_000)))
if existErr != nil {
b.Fatal(existErr)
}
}
})
}

View File

@@ -0,0 +1,585 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"database/sql"
"errors"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/types"
"os"
"strings"
"sync"
"time"
)
const CountFileDB = 20
// SQLiteFileList 文件缓存列表管理
type SQLiteFileList struct {
dir string
dbList [CountFileDB]*SQLiteFileListDB
onAdd func(item *Item)
onRemove func(item *Item)
memoryCache *ttlcache.Cache[zero.Zero]
// 老数据库地址
oldDir string
}
func NewSQLiteFileList(dir string) ListInterface {
return &SQLiteFileList{
dir: dir,
memoryCache: ttlcache.NewCache[zero.Zero](),
}
}
func (this *SQLiteFileList) SetOldDir(oldDir string) {
this.oldDir = oldDir
}
func (this *SQLiteFileList) Init() error {
// 检查目录是否存在
_, err := os.Stat(this.dir)
if err != nil {
err = os.MkdirAll(this.dir, 0777)
if err != nil {
return err
}
remotelogs.Println("CACHE", "create cache dir '"+this.dir+"'")
}
var dir = this.dir
if dir == "/" {
// 防止sqlite提示authority错误
dir = ""
}
remotelogs.Println("CACHE", "loading database from '"+dir+"' ...")
var wg = &sync.WaitGroup{}
var locker = sync.Mutex{}
var lastErr error
for i := 0; i < CountFileDB; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
var db = NewSQLiteFileListDB()
dbErr := db.Open(dir + "/db-" + types.String(i) + ".db")
if dbErr != nil {
lastErr = dbErr
return
}
dbErr = db.Init()
if dbErr != nil {
lastErr = dbErr
return
}
locker.Lock()
this.dbList[i] = db
locker.Unlock()
}(i)
}
wg.Wait()
if lastErr != nil {
return lastErr
}
// 升级老版本数据库
goman.New(func() {
this.upgradeOldDB()
})
return nil
}
func (this *SQLiteFileList) Reset() error {
// 不做任何事情
return nil
}
func (this *SQLiteFileList) Add(hash string, item *Item) error {
var db = this.GetDB(hash)
if !db.IsReady() {
return nil
}
err := db.AddSync(hash, item)
if err != nil {
return err
}
this.memoryCache.Write(hash, zero.Zero{}, this.maxExpiresAtForMemoryCache(item.ExpiresAt))
if this.onAdd != nil {
this.onAdd(item)
}
return nil
}
func (this *SQLiteFileList) Exist(hash string) (bool, int64, error) {
var db = this.GetDB(hash)
if !db.IsReady() {
return false, -1, nil
}
// 如果Hash列表里不存在那么必然不存在
if !db.hashMap.Exist(hash) {
return false, -1, nil
}
var item = this.memoryCache.Read(hash)
if item != nil {
return true, -1, nil
}
var row = db.existsByHashStmt.QueryRow(hash, time.Now().Unix())
if row.Err() != nil {
return false, -1, nil
}
var expiredAt int64
err := row.Scan(&expiredAt)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
err = nil
}
return false, -1, err
}
if expiredAt <= fasttime.Now().Unix() {
return false, -1, nil
}
this.memoryCache.Write(hash, zero.Zero{}, this.maxExpiresAtForMemoryCache(expiredAt))
return true, -1, nil
}
func (this *SQLiteFileList) ExistQuick(hash string) (isReady bool, found bool) {
var db = this.GetDB(hash)
if !db.IsReady() || !db.HashMapIsLoaded() {
return
}
isReady = true
found = db.hashMap.Exist(hash)
return
}
// CleanPrefix 清理某个前缀的缓存数据
func (this *SQLiteFileList) CleanPrefix(prefix string) error {
if len(prefix) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanPrefix(prefix)
if err != nil {
return err
}
}
return nil
}
// CleanMatchKey 清理通配符匹配的缓存数据,类似于 https://*.example.com/hello
func (this *SQLiteFileList) CleanMatchKey(key string) error {
if len(key) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanMatchKey(key)
if err != nil {
return err
}
}
return nil
}
// CleanMatchPrefix 清理通配符匹配的缓存数据,类似于 https://*.example.com/prefix/
func (this *SQLiteFileList) CleanMatchPrefix(prefix string) error {
if len(prefix) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanMatchPrefix(prefix)
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) Remove(hash string) error {
_, err := this.remove(hash, false)
return err
}
// Purge 清理过期的缓存
// count 每次遍历的最大数量,控制此数字可以保证每次清理的时候不用花太多时间
// callback 每次发现过期key的调用
func (this *SQLiteFileList) Purge(count int, callback func(hash string) error) (int, error) {
count /= CountFileDB
if count <= 0 {
count = 100
}
var countFound = 0
for _, db := range this.dbList {
hashStrings, err := db.ListExpiredItems(count)
if err != nil {
return 0, nil
}
if len(hashStrings) == 0 {
continue
}
countFound += len(hashStrings)
// 不在 rows.Next() 循环中操作是为了避免死锁
for _, hash := range hashStrings {
_, err = this.remove(hash, true)
if err != nil {
return 0, err
}
err = callback(hash)
if err != nil {
return 0, err
}
}
_, err = db.writeDB.Exec(`DELETE FROM "cacheItems" WHERE "hash" IN ('` + strings.Join(hashStrings, "', '") + `')`)
if err != nil {
return 0, err
}
}
return countFound, nil
}
func (this *SQLiteFileList) PurgeLFU(count int, callback func(hash string) error) error {
count /= CountFileDB
if count <= 0 {
count = 100
}
for _, db := range this.dbList {
hashStrings, err := db.ListLFUItems(count)
if err != nil {
return err
}
if len(hashStrings) == 0 {
continue
}
// 不在 rows.Next() 循环中操作是为了避免死锁
for _, hash := range hashStrings {
_, err = this.remove(hash, true)
if err != nil {
return err
}
err = callback(hash)
if err != nil {
return err
}
}
_, err = db.writeDB.Exec(`DELETE FROM "cacheItems" WHERE "hash" IN ('` + strings.Join(hashStrings, "', '") + `')`)
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) CleanAll() error {
defer this.memoryCache.Clean()
for _, db := range this.dbList {
err := db.CleanAll()
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) Stat(check func(hash string) bool) (*Stat, error) {
var result = &Stat{}
for _, db := range this.dbList {
if !db.IsReady() {
return &Stat{}, nil
}
// 这里不设置过期时间、不使用 check 函数,目的是让查询更快速一些
_ = check
var row = db.statStmt.QueryRow()
if row.Err() != nil {
return nil, row.Err()
}
var stat = &Stat{}
err := row.Scan(&stat.Count, &stat.Size, &stat.ValueSize)
if err != nil {
return nil, err
}
result.Count += stat.Count
result.Size += stat.Size
result.ValueSize += stat.ValueSize
}
return result, nil
}
// Count 总数量
// 常用的方法,所以避免直接查询数据库
func (this *SQLiteFileList) Count() (int64, error) {
var total int64
for _, db := range this.dbList {
count, err := db.Total()
if err != nil {
return 0, err
}
total += count
}
return total, nil
}
// IncreaseHit 增加点击量
func (this *SQLiteFileList) IncreaseHit(hash string) error {
var db = this.GetDB(hash)
if !db.IsReady() {
return nil
}
return db.IncreaseHitAsync(hash)
}
// OnAdd 添加事件
func (this *SQLiteFileList) OnAdd(f func(item *Item)) {
this.onAdd = f
}
// OnRemove 删除事件
func (this *SQLiteFileList) OnRemove(f func(item *Item)) {
this.onRemove = f
}
func (this *SQLiteFileList) Close() error {
this.memoryCache.Destroy()
var group = goman.NewTaskGroup()
for _, db := range this.dbList {
var dbCopy = db
group.Run(func() {
if dbCopy != nil {
_ = dbCopy.Close()
}
})
}
group.Wait()
return nil
}
func (this *SQLiteFileList) GetDBIndex(hash string) uint64 {
return fnv.HashString(hash) % CountFileDB
}
func (this *SQLiteFileList) GetDB(hash string) *SQLiteFileListDB {
return this.dbList[fnv.HashString(hash)%CountFileDB]
}
func (this *SQLiteFileList) HashMapIsLoaded() bool {
for _, db := range this.dbList {
if !db.HashMapIsLoaded() {
return false
}
}
return true
}
func (this *SQLiteFileList) remove(hash string, isDeleted bool) (notFound bool, err error) {
var db = this.GetDB(hash)
if !db.IsReady() {
return false, nil
}
// HashMap中不存在则确定不存在
if !db.hashMap.Exist(hash) {
return true, nil
}
defer db.hashMap.Delete(hash)
// 从缓存中删除
this.memoryCache.Delete(hash)
if !isDeleted {
err = db.DeleteSync(hash)
if err != nil {
return false, db.WrapError(err)
}
}
if this.onRemove != nil {
// when remove file item, no any extra information needed
this.onRemove(nil)
}
return false, nil
}
// 升级老版本数据库
func (this *SQLiteFileList) upgradeOldDB() {
if len(this.oldDir) == 0 {
return
}
_ = this.UpgradeV3(this.oldDir, false)
}
func (this *SQLiteFileList) UpgradeV3(oldDir string, brokenOnError bool) error {
// index.db
var indexDBPath = oldDir + "/index.db"
_, err := os.Stat(indexDBPath)
if err != nil {
return nil
}
remotelogs.Println("CACHE", "upgrading local database from '"+oldDir+"' ...")
defer func() {
_ = fsutils.Remove(indexDBPath)
remotelogs.Println("CACHE", "upgrading local database finished")
}()
db, err := dbs.OpenWriter("file:" + indexDBPath + "?cache=shared&mode=rwc&_journal_mode=WAL&_sync=" + dbs.SyncMode + "&_locking_mode=EXCLUSIVE")
if err != nil {
return err
}
defer func() {
_ = db.Close()
}()
var isFinished = false
var offset = 0
var count = 10000
for {
if isFinished {
break
}
err = func() error {
defer func() {
offset += count
}()
rows, err := db.Query(`SELECT "hash", "key", "headerSize", "bodySize", "metaSize", "expiredAt", "staleAt", "createdAt", "host", "serverId" FROM "cacheItems_v3" ORDER BY "id" ASC LIMIT ?, ?`, offset, count)
if err != nil {
return err
}
defer func() {
_ = rows.Close()
}()
var hash = ""
var key = ""
var headerSize int64
var bodySize int64
var metaSize int64
var expiredAt int64
var staleAt int64
var createdAt int64
var host string
var serverId int64
isFinished = true
for rows.Next() {
isFinished = false
err = rows.Scan(&hash, &key, &headerSize, &bodySize, &metaSize, &expiredAt, &staleAt, &createdAt, &host, &serverId)
if err != nil {
if brokenOnError {
return err
}
return nil
}
err = this.Add(hash, &Item{
Type: ItemTypeFile,
Key: key,
ExpiresAt: expiredAt,
StaleAt: staleAt,
HeaderSize: headerSize,
BodySize: bodySize,
MetaSize: metaSize,
Host: host,
ServerId: serverId,
})
if err != nil {
if brokenOnError {
return err
}
}
}
return nil
}()
if err != nil {
return err
}
time.Sleep(1 * time.Second)
}
return nil
}
func (this *SQLiteFileList) maxExpiresAtForMemoryCache(expiresAt int64) int64 {
var maxTimestamp = fasttime.Now().Unix() + 3600
if expiresAt > maxTimestamp {
return maxTimestamp
}
return expiresAt
}

View File

@@ -0,0 +1,447 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"strconv"
"sync"
"testing"
"time"
)
func TestFileList_Init(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
t.Log("ok")
}
func TestFileList_Add(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
var hash = stringutil.Md5("123456")
t.Log("db index:", list.GetDBIndex(hash))
err = list.Add(hash, &caches.Item{
Key: "123456",
ExpiresAt: time.Now().Unix() + 1,
HeaderSize: 1,
MetaSize: 2,
BodySize: 3,
Host: "teaos.cn",
ServerId: 1,
})
if err != nil {
t.Fatal(err)
}
t.Log(list.Exist(hash))
t.Log("ok")
}
func TestFileList_Add_Many(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var before = time.Now()
const offset = 0
const count = 1_000_000
for i := offset; i < offset+count; i++ {
u := "https://edge.teaos.cn/123456" + strconv.Itoa(i)
_ = list.Add(stringutil.Md5(u), &caches.Item{
Key: u,
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1,
MetaSize: 2,
BodySize: 3,
})
if err != nil {
t.Fatal(err)
}
if i > 0 && i%10_000 == 0 {
t.Log(i, int(10000/time.Since(before).Seconds()), "qps")
before = time.Now()
}
}
t.Log("ok")
}
func TestFileList_Exist(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
total, _ := list.Count()
t.Log("total:", total)
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
{
var hash = stringutil.Md5("123456")
exists, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "exists:", exists)
}
{
var hash = stringutil.Md5("http://edge.teaos.cn/1234561")
exists, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "exists:", exists)
}
}
func TestFileList_Exist_Many_DB(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
// 测试在多个数据库下的性能
var listSlice = []caches.ListInterface{}
for i := 1; i <= 10; i++ {
var list = caches.NewSQLiteFileList(Tea.Root + "/data/data" + strconv.Itoa(i))
err := list.Init()
if err != nil {
t.Fatal(err)
}
listSlice = append(listSlice, list)
}
defer func() {
for _, list := range listSlice {
_ = list.Close()
}
}()
var wg = sync.WaitGroup{}
var threads = 8
wg.Add(threads)
var count = 200_000
var countLocker sync.Mutex
var tasks = make(chan int, count)
for i := 0; i < count; i++ {
tasks <- i
}
var hash = stringutil.Md5("http://edge.teaos.cn/1234561")
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
for i := 0; i < threads; i++ {
goman.New(func() {
defer wg.Done()
for {
select {
case <-tasks:
countLocker.Lock()
count--
countLocker.Unlock()
var list = listSlice[rands.Int(0, len(listSlice)-1)]
_, _, _ = list.Exist(hash)
default:
return
}
}
})
}
wg.Wait()
t.Log("left:", count)
}
func TestFileList_CleanPrefix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
err = list.CleanPrefix("123")
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestFileList_Remove(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
list.OnRemove(func(item *caches.Item) {
t.Logf("remove %#v", item)
})
err = list.Remove(stringutil.Md5("123456"))
if err != nil {
t.Fatal(err)
}
t.Log("ok")
t.Log("===count===")
t.Log(list.Count())
}
func TestFileList_Purge(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var count = 0
_, err = list.Purge(caches.CountFileDB*2, func(hash string) error {
t.Log(hash)
count++
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok, purged", count, "items")
}
func TestFileList_PurgeLFU(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var count = 0
err = list.PurgeLFU(caches.CountFileDB*2, func(hash string) error {
t.Log(hash)
count++
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok, purged", count, "items")
}
func TestFileList_Stat(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
stat, err := list.Stat(nil)
if err != nil {
t.Fatal(err)
}
t.Log("count:", stat.Count, "size:", stat.Size, "valueSize:", stat.ValueSize)
}
func TestFileList_Count(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var before = time.Now()
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count:", count)
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestFileList_CleanAll(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
err = list.CleanAll()
if err != nil {
t.Fatal(err)
}
t.Log("ok")
t.Log(list.Count())
}
func TestFileList_UpgradeV3(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p43").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
err = list.UpgradeV3("/Users/WorkSpace/EdgeProject/EdgeCache/p43", false)
if err != nil {
t.Log(err)
return
}
t.Log("ok")
}
func BenchmarkFileList_Exist(b *testing.B) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _ = list.Exist("f0eb5b87e0b0041f3917002c0707475f" + types.String(i))
}
}

View File

@@ -0,0 +1,56 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
type ListInterface interface {
// Init 初始化
Init() error
// Reset 重置数据
Reset() error
// Add 添加内容
Add(hash string, item *Item) error
// Exist 检查内容是否存在
Exist(hash string) (ok bool, size int64, err error)
// CleanPrefix 清除某个前缀的缓存
CleanPrefix(prefix string) error
// CleanMatchKey 清除通配符匹配的Key
CleanMatchKey(key string) error
// CleanMatchPrefix 清除通配符匹配的前缀
CleanMatchPrefix(prefix string) error
// Remove 删除内容
Remove(hash string) error
// Purge 清理过期数据
Purge(count int, callback func(hash string) error) (int, error)
// PurgeLFU 清理LFU数据
PurgeLFU(count int, callback func(hash string) error) error
// CleanAll 清除所有缓存
CleanAll() error
// Stat 统计
Stat(check func(hash string) bool) (*Stat, error)
// Count 总数量
Count() (int64, error)
// OnAdd 添加事件
OnAdd(f func(item *Item))
// OnRemove 删除事件
OnRemove(f func(item *Item))
// Close 关闭
Close() error
// IncreaseHit 增加点击量
IncreaseHit(hash string) error
}

View File

@@ -0,0 +1,437 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/configutils"
"github.com/iwind/TeaGo/logs"
"net"
"net/url"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
)
// MemoryList 内存缓存列表管理
type MemoryList struct {
count int64
itemMaps map[string]map[string]*Item // prefix => { hash => item }
prefixes []string
locker sync.RWMutex
onAdd func(item *Item)
onRemove func(item *Item)
purgeIndex int
}
func NewMemoryList() ListInterface {
return &MemoryList{
itemMaps: map[string]map[string]*Item{},
}
}
func (this *MemoryList) Init() error {
this.prefixes = []string{"000"}
for i := 100; i <= 999; i++ {
this.prefixes = append(this.prefixes, strconv.Itoa(i))
}
for _, prefix := range this.prefixes {
this.itemMaps[prefix] = map[string]*Item{}
}
return nil
}
func (this *MemoryList) Reset() error {
this.locker.Lock()
for key := range this.itemMaps {
this.itemMaps[key] = map[string]*Item{}
}
this.locker.Unlock()
atomic.StoreInt64(&this.count, 0)
return nil
}
func (this *MemoryList) Add(hash string, item *Item) error {
this.locker.Lock()
prefix := this.prefix(hash)
itemMap, ok := this.itemMaps[prefix]
if !ok {
itemMap = map[string]*Item{}
this.itemMaps[prefix] = itemMap
}
// 先删除,为了可以正确触发统计
oldItem, ok := itemMap[hash]
if ok {
// 回调
if this.onRemove != nil {
this.onRemove(oldItem)
}
} else {
atomic.AddInt64(&this.count, 1)
}
// 添加
if this.onAdd != nil {
this.onAdd(item)
}
itemMap[hash] = item
this.locker.Unlock()
return nil
}
func (this *MemoryList) Exist(hash string) (bool, int64, error) {
this.locker.RLock()
defer this.locker.RUnlock()
prefix := this.prefix(hash)
itemMap, ok := this.itemMaps[prefix]
if !ok {
return false, -1, nil
}
item, ok := itemMap[hash]
if !ok {
return false, -1, nil
}
return !item.IsExpired(), -1, nil
}
// CleanPrefix 根据前缀进行清除
func (this *MemoryList) CleanPrefix(prefix string) error {
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if strings.HasPrefix(item.Key, prefix) {
item.ExpiresAt = 0
}
}
}
return nil
}
// CleanMatchKey 清理通配符匹配的缓存数据,类似于 https://*.example.com/hello
func (this *MemoryList) CleanMatchKey(key string) error {
if strings.Contains(key, SuffixAll) {
return nil
}
u, err := url.Parse(key)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
var requestURI = u.RequestURI()
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if configutils.MatchDomain(host, item.Host) {
var itemRequestURI = item.RequestURI()
if itemRequestURI == requestURI || strings.HasPrefix(itemRequestURI, requestURI+SuffixAll) {
item.ExpiresAt = 0
}
}
}
}
return nil
}
// CleanMatchPrefix 清理通配符匹配的缓存数据,类似于 https://*.example.com/prefix/
func (this *MemoryList) CleanMatchPrefix(prefix string) error {
u, err := url.Parse(prefix)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
var requestURI = u.RequestURI()
var isRootPath = requestURI == "/"
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if configutils.MatchDomain(host, item.Host) {
var itemRequestURI = item.RequestURI()
if isRootPath || strings.HasPrefix(itemRequestURI, requestURI) {
item.ExpiresAt = 0
}
}
}
}
return nil
}
func (this *MemoryList) Remove(hash string) error {
this.locker.Lock()
itemMap, ok := this.itemMaps[this.prefix(hash)]
if !ok {
this.locker.Unlock()
return nil
}
item, ok := itemMap[hash]
if ok {
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
}
this.locker.Unlock()
return nil
}
// Purge 清理过期的缓存
// count 每次遍历的最大数量,控制此数字可以保证每次清理的时候不用花太多时间
// callback 每次发现过期key的调用
func (this *MemoryList) Purge(count int, callback func(hash string) error) (int, error) {
this.locker.Lock()
var deletedHashList = []string{}
if this.purgeIndex >= len(this.prefixes) {
this.purgeIndex = 0
}
var prefix = this.prefixes[this.purgeIndex]
this.purgeIndex++
itemMap, ok := this.itemMaps[prefix]
if !ok {
this.locker.Unlock()
return 0, nil
}
var countFound = 0
for hash, item := range itemMap {
if count <= 0 {
break
}
if item.IsExpired() {
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
deletedHashList = append(deletedHashList, hash)
countFound++
}
count--
}
this.locker.Unlock()
// 执行外部操作
for _, hash := range deletedHashList {
if callback != nil {
err := callback(hash)
if err != nil {
return 0, err
}
}
}
return countFound, nil
}
func (this *MemoryList) PurgeLFU(count int, callback func(hash string) error) error {
if count <= 0 {
return nil
}
var deletedHashList = []string{}
var week = currentWeek()
var round = 0
this.locker.Lock()
Loop:
for {
var found = false
round++
for _, itemMap := range this.itemMaps {
for hash, item := range itemMap {
found = true
if week-item.Week <= 1 /** 最近有在使用 **/ && round <= 3 /** 查找轮数过多还不满足数量要求的就不再限制 **/ {
continue
}
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
deletedHashList = append(deletedHashList, hash)
count--
if count <= 0 {
break Loop
}
break
}
}
if !found {
break
}
}
this.locker.Unlock()
// 执行外部操作
for _, hash := range deletedHashList {
if callback != nil {
err := callback(hash)
if err != nil {
return err
}
}
}
return nil
}
func (this *MemoryList) CleanAll() error {
return this.Reset()
}
func (this *MemoryList) Stat(check func(hash string) bool) (*Stat, error) {
this.locker.RLock()
defer this.locker.RUnlock()
result := &Stat{
Count: 0,
Size: 0,
}
for _, itemMap := range this.itemMaps {
for hash, item := range itemMap {
if !item.IsExpired() {
// 检查文件是否存在、内容是否正确等
if check != nil && check(hash) {
result.Count++
result.ValueSize += item.Size()
result.Size += item.TotalSize()
}
}
}
}
return result, nil
}
// Count 总数量
func (this *MemoryList) Count() (int64, error) {
var count = atomic.LoadInt64(&this.count)
return count, nil
}
// OnAdd 添加事件
func (this *MemoryList) OnAdd(f func(item *Item)) {
this.onAdd = f
}
// OnRemove 删除事件
func (this *MemoryList) OnRemove(f func(item *Item)) {
this.onRemove = f
}
func (this *MemoryList) Close() error {
return nil
}
// IncreaseHit 增加点击量
func (this *MemoryList) IncreaseHit(hash string) error {
this.locker.Lock()
itemMap, ok := this.itemMaps[this.prefix(hash)]
if !ok {
this.locker.Unlock()
return nil
}
item, ok := itemMap[hash]
if ok {
item.Week = currentWeek()
}
this.locker.Unlock()
return nil
}
func (this *MemoryList) Prefixes() []string {
return this.prefixes
}
func (this *MemoryList) ItemMaps() map[string]map[string]*Item {
return this.itemMaps
}
func (this *MemoryList) PurgeIndex() int {
return this.purgeIndex
}
func (this *MemoryList) Print(t *testing.T) {
this.locker.Lock()
for _, itemMap := range this.itemMaps {
if len(itemMap) > 0 {
logs.PrintAsJSON(itemMap, t)
}
}
this.locker.Unlock()
}
func (this *MemoryList) prefix(hash string) string {
var prefix string
if len(hash) > 3 {
prefix = hash[:3]
} else {
prefix = hash
}
_, ok := this.itemMaps[prefix]
if !ok {
prefix = "000"
}
return prefix
}

View File

@@ -0,0 +1,327 @@
package caches_test
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/cespare/xxhash/v2"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/rand"
"sort"
"strconv"
"testing"
"time"
)
func TestMemoryList_Add(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("123456", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
t.Log(list.Prefixes())
logs.PrintAsJSON(list.ItemMaps(), t)
t.Log(list.Count())
}
func TestMemoryList_Remove(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Remove("b")
list.Print(t)
t.Log(list.Count())
}
func TestMemoryList_Purge(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("c", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix() - 3600,
HeaderSize: 1024,
})
_ = list.Add("d", &caches.Item{
Key: "d1",
ExpiresAt: time.Now().Unix() - 2,
HeaderSize: 1024,
})
_, _ = list.Purge(100, func(hash string) error {
t.Log("delete:", hash)
return nil
})
list.Print(t)
for i := 0; i < 1000; i++ {
_, _ = list.Purge(100, func(hash string) error {
t.Log("delete:", hash)
return nil
})
t.Log(list.PurgeIndex())
}
t.Log(list.Count())
}
func TestMemoryList_Purge_Large_List(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
var count = 100
if testutils.IsSingleTesting() {
count = 1_000_000
}
for i := 0; i < count; i++ {
_ = list.Add("a"+strconv.Itoa(i), &caches.Item{
Key: "a" + strconv.Itoa(i),
ExpiresAt: time.Now().Unix() + int64(rands.Int(0, 24*3600)),
HeaderSize: 1024,
})
}
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Hour)
}
}
func TestMemoryList_Stat(t *testing.T) {
list := caches.NewMemoryList()
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("c", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix(),
HeaderSize: 1024,
})
_ = list.Add("d", &caches.Item{
Key: "d1",
ExpiresAt: time.Now().Unix() - 2,
HeaderSize: 1024,
})
result, _ := list.Stat(func(hash string) bool {
// 随机测试
return rand.Int()%2 == 0
})
t.Log(result)
}
func TestMemoryList_CleanPrefix(t *testing.T) {
list := caches.NewMemoryList()
_ = list.Init()
before := time.Now()
var count = 100
if testutils.IsSingleTesting() {
count = 1_000_000
}
for i := 0; i < count; i++ {
key := "https://www.teaos.cn/hello/" + strconv.Itoa(i/10000) + "/" + strconv.Itoa(i) + ".html"
_ = list.Add(fmt.Sprintf("%d", xxhash.Sum64String(key)), &caches.Item{
Key: key,
ExpiresAt: time.Now().Unix() + 3600,
BodySize: 0,
HeaderSize: 0,
})
}
t.Log(time.Since(before).Seconds()*1000, "ms")
before = time.Now()
err := list.CleanPrefix("https://www.teaos.cn/hello/10")
if err != nil {
t.Fatal(err)
}
logs.Println(list.Stat(func(hash string) bool {
return true
}))
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestMapRandomDelete(t *testing.T) {
var countMap = map[int]int{} // k => count
var count = 1000
if testutils.IsSingleTesting() {
count = 1_000_000
}
for j := 0; j < count; j++ {
var m = map[int]bool{}
for i := 0; i < 100; i++ {
m[i] = true
}
var count = 0
for k := range m {
delete(m, k)
count++
if count >= 10 {
break
}
}
for k := range m {
countMap[k]++
}
}
var counts = []int{}
for _, count := range countMap {
counts = append(counts, count)
}
sort.Ints(counts)
t.Log("["+types.String(len(counts))+"]", counts)
}
func TestMemoryList_PurgeLFU(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
_ = list.Add("1", &caches.Item{})
_ = list.Add("2", &caches.Item{})
_ = list.Add("3", &caches.Item{})
_ = list.Add("4", &caches.Item{})
_ = list.Add("5", &caches.Item{})
//_ = list.IncreaseHit("1")
//_ = list.IncreaseHit("2")
//_ = list.IncreaseHit("3")
//_ = list.IncreaseHit("4")
//_ = list.IncreaseHit("5")
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count items before purge:", count)
err = list.PurgeLFU(5, func(hash string) error {
t.Log("purge lfu:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok")
count, err = list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count items left:", count)
}
func TestMemoryList_CleanAll(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
_ = list.Add("a", &caches.Item{})
_ = list.CleanAll()
logs.PrintAsJSON(list.ItemMaps(), t)
t.Log(list.Count())
}
func TestMemoryList_GC(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
for i := 0; i < 1_000_000; i++ {
key := "https://www.teaos.cn/hello" + strconv.Itoa(i/100000) + "/" + strconv.Itoa(i) + ".html"
_ = list.Add(fmt.Sprintf("%d", xxhash.Sum64String(key)), &caches.Item{
Key: key,
ExpiresAt: 0,
BodySize: 0,
HeaderSize: 0,
})
}
t.Log("clean...", len(list.ItemMaps()))
_ = list.CleanAll()
t.Log("cleanAll...", len(list.ItemMaps()))
before := time.Now()
//runtime.GC()
t.Log("gc cost:", time.Since(before).Seconds()*1000, "ms")
if testutils.IsSingleTesting() {
timeout := time.NewTimer(2 * time.Minute)
<-timeout.C
t.Log("2 minutes passed")
time.Sleep(30 * time.Minute)
}
}
func BenchmarkMemoryList(b *testing.B) {
var list = caches.NewMemoryList()
err := list.Init()
if err != nil {
b.Fatal(err)
}
for i := 0; i < 1_000_000; i++ {
_ = list.Add(stringutil.Md5(types.String(i)), &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _, _ = list.Exist(types.String("a" + types.String(rands.Int(1, 10000))))
_ = list.Add("a"+types.String(rands.Int(1, 100000)), &caches.Item{})
_, _ = list.Purge(1000, func(hash string) error {
return nil
})
}
})
}

View File

@@ -0,0 +1,267 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/events"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/iwind/TeaGo/lists"
"github.com/iwind/TeaGo/types"
"strconv"
"sync"
)
var SharedManager = NewManager()
func init() {
if !teaconst.IsMain {
return
}
events.OnClose(func() {
remotelogs.Println("CACHE", "quiting cache manager")
SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{})
})
}
// Manager 缓存策略管理器
type Manager struct {
// 全局配置
MaxDiskCapacity *shared.SizeCapacity
MainDiskDir string
SubDiskDirs []*serverconfigs.CacheDir
MaxMemoryCapacity *shared.SizeCapacity
CountFileStorages int
CountMemoryStorages int
policyMap map[int64]*serverconfigs.HTTPCachePolicy // policyId => []*Policy
storageMap map[int64]StorageInterface // policyId => *Storage
locker sync.RWMutex
}
// NewManager 获取管理器对象
func NewManager() *Manager {
var m = &Manager{
policyMap: map[int64]*serverconfigs.HTTPCachePolicy{},
storageMap: map[int64]StorageInterface{},
}
return m
}
// UpdatePolicies 重新设置策略
func (this *Manager) UpdatePolicies(newPolicies []*serverconfigs.HTTPCachePolicy) {
this.locker.Lock()
defer this.locker.Unlock()
var newPolicyIds = []int64{}
for _, policy := range newPolicies {
// 使用节点单独的缓存目录
policy.UpdateDiskDir(this.MainDiskDir, this.SubDiskDirs)
newPolicyIds = append(newPolicyIds, policy.Id)
}
// 停止旧有的
for _, oldPolicy := range this.policyMap {
if !lists.ContainsInt64(newPolicyIds, oldPolicy.Id) {
remotelogs.Println("CACHE", "remove policy "+strconv.FormatInt(oldPolicy.Id, 10))
delete(this.policyMap, oldPolicy.Id)
storage, ok := this.storageMap[oldPolicy.Id]
if ok {
storage.Stop()
delete(this.storageMap, oldPolicy.Id)
}
}
}
// 启动新的
for _, newPolicy := range newPolicies {
_, ok := this.policyMap[newPolicy.Id]
if !ok {
remotelogs.Println("CACHE", "add policy "+strconv.FormatInt(newPolicy.Id, 10))
}
// 初始化
err := newPolicy.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init policy error: "+err.Error())
continue
}
this.policyMap[newPolicy.Id] = newPolicy
}
// 启动存储管理
for _, policy := range this.policyMap {
storage, ok := this.storageMap[policy.Id]
if !ok {
storage = this.NewStorageWithPolicy(policy)
if storage == nil {
remotelogs.Error("CACHE", "can not find storage type '"+policy.Type+"'")
continue
}
err := storage.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init storage failed: "+err.Error())
continue
}
this.storageMap[policy.Id] = storage
} else {
// 检查policy是否有变化
if !storage.Policy().IsSame(policy) {
// 检查是否可以直接修改
if storage.CanUpdatePolicy(policy) {
err := policy.Init()
if err != nil {
remotelogs.Error("CACHE", "reload policy '"+types.String(policy.Id)+"' failed: init policy failed: "+err.Error())
continue
}
remotelogs.Println("CACHE", "reload policy '"+types.String(policy.Id)+"'")
storage.UpdatePolicy(policy)
continue
}
remotelogs.Println("CACHE", "restart policy '"+types.String(policy.Id)+"'")
// 停止老的
storage.Stop()
delete(this.storageMap, policy.Id)
// 启动新的
storage = this.NewStorageWithPolicy(policy)
if storage == nil {
remotelogs.Error("CACHE", "can not find storage type '"+policy.Type+"'")
continue
}
err := storage.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init storage failed: "+err.Error())
continue
}
this.storageMap[policy.Id] = storage
}
}
}
this.CountFileStorages = 0
this.CountMemoryStorages = 0
for _, storage := range this.storageMap {
_, isFileStorage := storage.(*FileStorage)
this.CountMemoryStorages++
if isFileStorage {
this.CountFileStorages++
}
}
}
// FindPolicy 获取Policy信息
func (this *Manager) FindPolicy(policyId int64) *serverconfigs.HTTPCachePolicy {
this.locker.RLock()
defer this.locker.RUnlock()
return this.policyMap[policyId]
}
// FindStorageWithPolicy 根据策略ID查找存储
func (this *Manager) FindStorageWithPolicy(policyId int64) StorageInterface {
this.locker.RLock()
defer this.locker.RUnlock()
return this.storageMap[policyId]
}
// NewStorageWithPolicy 根据策略获取存储对象
func (this *Manager) NewStorageWithPolicy(policy *serverconfigs.HTTPCachePolicy) StorageInterface {
switch policy.Type {
case serverconfigs.CachePolicyStorageFile:
return NewFileStorage(policy)
case serverconfigs.CachePolicyStorageMemory:
return NewMemoryStorage(policy, nil)
}
return nil
}
// StorageMap 获取已有的存储对象
func (this *Manager) StorageMap() map[int64]StorageInterface {
return this.storageMap
}
// TotalMemorySize 消耗的内存尺寸
func (this *Manager) TotalMemorySize() int64 {
this.locker.RLock()
defer this.locker.RUnlock()
total := int64(0)
for _, storage := range this.storageMap {
total += storage.TotalMemorySize()
}
return total
}
// FindAllCachePaths 所有缓存路径
func (this *Manager) FindAllCachePaths() []string {
this.locker.Lock()
defer this.locker.Unlock()
var result = []string{}
for _, policy := range this.policyMap {
if policy.Type == serverconfigs.CachePolicyStorageFile {
if policy.Options != nil {
dir, ok := policy.Options["dir"]
if ok {
var dirString = types.String(dir)
if len(dirString) > 0 {
result = append(result, dirString)
}
}
}
}
}
return result
}
// FindAllStorages 读取所有缓存存储
func (this *Manager) FindAllStorages() []StorageInterface {
this.locker.Lock()
defer this.locker.Unlock()
var storages = []StorageInterface{}
for _, storage := range this.storageMap {
storages = append(storages, storage)
}
return storages
}
// ScanGarbageCaches 清理目录中“失联”的缓存文件
func (this *Manager) ScanGarbageCaches(callback func(path string) error) error {
var storages = this.FindAllStorages()
for _, storage := range storages {
fileStorage, ok := storage.(*FileStorage)
if !ok {
continue
}
err := fileStorage.ScanGarbageCaches(callback)
if err != nil {
return err
}
}
return nil
}
// MaxSystemMemoryBytesPerStorage 计算单个策略能使用的系统最大内存
func (this *Manager) MaxSystemMemoryBytesPerStorage() int64 {
var count = this.CountMemoryStorages
if count < 1 {
count = 1
}
var resultBytes = int64(memutils.SystemMemoryBytes()) / 3 / int64(count) // 1/3 of the system memory
if resultBytes < 1<<30 {
resultBytes = 1 << 30
}
return resultBytes
}

View File

@@ -0,0 +1,133 @@
package caches_test
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/Tea"
"testing"
)
func TestManager_UpdatePolicies(t *testing.T) {
{
var policies = []*serverconfigs.HTTPCachePolicy{}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
{
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 2,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 3,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
{
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 2,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 4,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
}
func TestManager_ChangePolicy_Memory(t *testing.T) {
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageMemory,
Options: map[string]interface{}{},
Capacity: &shared.SizeCapacity{Count: 1, Unit: shared.SizeCapacityUnitGB},
},
}
caches.SharedManager.UpdatePolicies(policies)
caches.SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageMemory,
Options: map[string]interface{}{},
Capacity: &shared.SizeCapacity{Count: 2, Unit: shared.SizeCapacityUnitGB},
},
})
}
func TestManager_ChangePolicy_File(t *testing.T) {
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/data/cache-index/p1",
},
Capacity: &shared.SizeCapacity{Count: 1, Unit: shared.SizeCapacityUnitGB},
},
}
caches.SharedManager.UpdatePolicies(policies)
caches.SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/data/cache-index/p1",
},
Capacity: &shared.SizeCapacity{Count: 2, Unit: shared.SizeCapacityUnitGB},
},
})
}
func TestManager_MaxSystemMemoryBytesPerStorage(t *testing.T) {
for i := 0; i < 100; i++ {
caches.SharedManager.CountMemoryStorages = i
t.Log(i, caches.SharedManager.MaxSystemMemoryBytesPerStorage()>>30, "GB")
}
}
func printManager(t *testing.T) {
t.Log("===manager==")
t.Log("storage:")
for _, storage := range caches.SharedManager.StorageMap() {
t.Log(" storage:", storage.Policy().Id)
}
t.Log("===============")
}

View File

@@ -0,0 +1,48 @@
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !windows
package caches
import (
"fmt"
"golang.org/x/sys/unix"
)
// TotalDiskSize 消耗的磁盘尺寸
func (this *Manager) TotalDiskSize() int64 {
this.locker.RLock()
defer this.locker.RUnlock()
var total = int64(0)
var sidMap = map[string]bool{} // partition sid => bool
for _, storage := range this.storageMap {
// 这里不能直接用 storage.TotalDiskSize() 相加,因为多个缓存策略缓存目录可能处在同一个分区目录下
fileStorage, ok := storage.(*FileStorage)
if ok {
var options = fileStorage.options // copy
if options != nil {
var dir = options.Dir // copy
if len(dir) == 0 {
continue
}
var stat = &unix.Statfs_t{}
err := unix.Statfs(dir, stat)
if err != nil {
continue
}
var sid = fmt.Sprintf("%d_%d", stat.Fsid.Val[0], stat.Fsid.Val[1])
if sidMap[sid] {
continue
}
sidMap[sid] = true
total += int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize) // we add extra int64() for darwin
}
}
}
if total < 0 {
total = 0
}
return total
}

View File

@@ -0,0 +1,36 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"io"
"os"
)
type OpenFile struct {
fp *os.File
meta []byte
header []byte
version int64
size int64
}
func NewOpenFile(fp *os.File, meta []byte, header []byte, version int64, size int64) *OpenFile {
return &OpenFile{
fp: fp,
meta: meta,
header: header,
version: version,
size: size,
}
}
func (this *OpenFile) SeekStart() error {
_, err := this.fp.Seek(0, io.SeekStart)
return err
}
func (this *OpenFile) Close() error {
this.meta = nil
return this.fp.Close()
}

View File

@@ -0,0 +1,210 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/fsnotify/fsnotify"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/types"
"path/filepath"
"runtime"
"sync"
"time"
)
const (
maxOpenFileSize = 256 << 20
)
type OpenFileCache struct {
poolMap map[string]*OpenFilePool // file path => Pool
poolList *linkedlist.List[*OpenFilePool]
watcher *fsnotify.Watcher
locker sync.RWMutex
maxCount int
capacitySize int64
count int
usedSize int64
}
func NewOpenFileCache(maxCount int) (*OpenFileCache, error) {
if maxCount <= 0 {
maxCount = 16384
}
var cache = &OpenFileCache{
maxCount: maxCount,
poolMap: map[string]*OpenFilePool{},
poolList: linkedlist.NewList[*OpenFilePool](),
capacitySize: (int64(memutils.SystemMemoryGB()) << 30) / 16,
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
cache.watcher = watcher
goman.New(func() {
for event := range watcher.Events {
if runtime.GOOS == "linux" || event.Op&fsnotify.Chmod != fsnotify.Chmod {
cache.Close(event.Name)
}
}
})
return cache, nil
}
func (this *OpenFileCache) Get(filename string) *OpenFile {
filename = filepath.Clean(filename)
this.locker.RLock()
pool, ok := this.poolMap[filename]
this.locker.RUnlock()
if ok {
file, consumed, consumedSize := pool.Get()
if consumed {
this.locker.Lock()
this.count--
this.usedSize -= consumedSize
// pool如果为空也不需要从列表中删除避免put时需要重新创建
this.locker.Unlock()
}
return file
}
return nil
}
func (this *OpenFileCache) Put(filename string, file *OpenFile) {
filename = filepath.Clean(filename)
if file.size > maxOpenFileSize {
return
}
this.locker.Lock()
defer this.locker.Unlock()
// 如果超过当前容量,则关闭最早的
if this.count >= this.maxCount || this.usedSize+file.size >= this.capacitySize {
this.consumeHead()
return
}
pool, ok := this.poolMap[filename]
var success bool
if ok {
success = pool.Put(file)
} else {
_ = this.watcher.Add(filename)
pool = NewOpenFilePool(filename)
pool.version = file.version
this.poolMap[filename] = pool
success = pool.Put(file)
}
this.poolList.Push(pool.linkItem)
// 检查长度
if success {
this.count++
this.usedSize += file.size
}
}
func (this *OpenFileCache) Close(filename string) {
filename = filepath.Clean(filename)
this.locker.Lock()
pool, ok := this.poolMap[filename]
if ok {
// 设置关闭状态
pool.SetClosing()
delete(this.poolMap, filename)
this.poolList.Remove(pool.linkItem)
_ = this.watcher.Remove(filename)
this.count -= pool.Len()
this.usedSize -= pool.usedSize
}
this.locker.Unlock()
// 在locker之外提升性能
if ok {
pool.Close()
}
}
func (this *OpenFileCache) CloseAll() {
this.locker.Lock()
for _, pool := range this.poolMap {
pool.Close()
}
this.poolMap = map[string]*OpenFilePool{}
this.poolList.Reset()
_ = this.watcher.Close()
this.count = 0
this.usedSize = 0
this.locker.Unlock()
}
func (this *OpenFileCache) SetCapacity(capacityBytes int64) {
this.capacitySize = capacityBytes
}
func (this *OpenFileCache) Debug() {
var ticker = time.NewTicker(5 * time.Second)
goman.New(func() {
for range ticker.C {
logs.Println("==== " + types.String(this.count) + ", " + fmt.Sprintf("%.4fMB", float64(this.usedSize)/(1<<20)) + " ====")
this.poolList.Range(func(item *linkedlist.Item[*OpenFilePool]) (goNext bool) {
logs.Println(filepath.Base(item.Value.Filename()), item.Value.Len())
return true
})
}
})
}
func (this *OpenFileCache) consumeHead() {
var delta = 1
if this.count > 100 {
delta = 2
}
for i := 0; i < delta; i++ {
var head = this.poolList.Head()
if head == nil {
break
}
var headPool = head.Value
headFile, consumed, consumedSize := headPool.Get()
if consumed {
this.count--
this.usedSize -= consumedSize
if headFile != nil {
_ = headFile.Close()
}
}
if headPool.Len() == 0 {
delete(this.poolMap, headPool.filename)
this.poolList.Remove(head)
_ = this.watcher.Remove(headPool.filename)
}
}
}

View File

@@ -0,0 +1,69 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/types"
"testing"
"time"
)
func TestNewOpenFileCache_Close(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.Debug()
cache.Put("a.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("c.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Get("b.txt")
cache.Get("d.txt") // not exist
cache.Close("a.txt")
if testutils.IsSingleTesting() {
time.Sleep(100 * time.Second)
}
}
func TestNewOpenFileCache_OverSize(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.SetCapacity(1 << 30)
cache.Debug()
for i := 0; i < 100; i++ {
cache.Put("a"+types.String(i)+".txt", caches.NewOpenFile(nil, nil, nil, 0, 128<<20))
}
if testutils.IsSingleTesting() {
time.Sleep(100 * time.Second)
}
}
func TestNewOpenFileCache_CloseAll(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.Debug()
cache.Put("a.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Put("c.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Get("b.txt")
cache.Get("d.txt")
cache.CloseAll()
if testutils.IsSingleTesting() {
time.Sleep(6 * time.Second)
}
}

View File

@@ -0,0 +1,106 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
)
type OpenFilePool struct {
c chan *OpenFile
linkItem *linkedlist.Item[*OpenFilePool]
filename string
version int64
isClosed bool
usedSize int64
}
func NewOpenFilePool(filename string) *OpenFilePool {
var pool = &OpenFilePool{
filename: filename,
c: make(chan *OpenFile, 1024),
version: fasttime.Now().UnixMilli(),
}
pool.linkItem = linkedlist.NewItem[*OpenFilePool](pool)
return pool
}
func (this *OpenFilePool) Filename() string {
return this.filename
}
func (this *OpenFilePool) Get() (resultFile *OpenFile, consumed bool, consumedSize int64) {
// 如果已经关闭,直接返回
if this.isClosed {
return nil, false, 0
}
select {
case file := <-this.c:
if file != nil {
this.usedSize -= file.size
err := file.SeekStart()
if err != nil {
_ = file.Close()
return nil, true, file.size
}
file.version = this.version
return file, true, file.size
}
return nil, false, 0
default:
return nil, false, 0
}
}
func (this *OpenFilePool) Put(file *OpenFile) bool {
// 如果已关闭,则不接受新的文件
if this.isClosed {
_ = file.Close()
return false
}
// 检查文件版本号
if this.version > 0 && file.version > 0 && file.version != this.version {
_ = file.Close()
return false
}
// 加入Pool
select {
case this.c <- file:
this.usedSize += file.size
return true
default:
// 多余的直接关闭
_ = file.Close()
return false
}
}
func (this *OpenFilePool) Len() int {
return len(this.c)
}
func (this *OpenFilePool) TotalSize() int64 {
return this.usedSize
}
func (this *OpenFilePool) SetClosing() {
this.isClosed = true
}
func (this *OpenFilePool) Close() {
this.isClosed = true
for {
select {
case file := <-this.c:
_ = file.Close()
default:
return
}
}
}

View File

@@ -0,0 +1,46 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/rands"
"sync"
"testing"
)
func TestOpenFilePool_Get(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
t.Log(pool.Filename())
t.Log(pool.Get())
t.Log(pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1)))
t.Log(pool.Get())
t.Log(pool.Get())
}
func TestOpenFilePool_Close(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
pool.Close()
}
func TestOpenFilePool_Concurrent(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
var concurrent = 1000
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for i := 0; i < concurrent; i++ {
go func() {
defer wg.Done()
if rands.Int(0, 1) == 1 {
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
}
if rands.Int(0, 1) == 0 {
pool.Get()
}
}()
}
wg.Wait()
}

View File

@@ -0,0 +1,270 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"bytes"
"encoding/json"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"strconv"
)
// PartialRanges 内容分区范围定义
type PartialRanges struct {
Version int `json:"version"` // 版本号
Ranges [][2]int64 `json:"ranges"` // 范围
BodySize int64 `json:"bodySize"` // 总长度
ContentMD5 string `json:"contentMD5"` // 内容md5
}
// NewPartialRanges 获取新对象
func NewPartialRanges(expiresAt int64) *PartialRanges {
return &PartialRanges{
Ranges: [][2]int64{},
Version: 2,
}
}
// NewPartialRangesFromData 从数据中解析范围
func NewPartialRangesFromData(data []byte) (*PartialRanges, error) {
var rs = NewPartialRanges(0)
for {
var index = bytes.IndexRune(data, '\n')
if index < 0 {
break
}
var line = data[:index]
var colonIndex = bytes.IndexRune(line, ':')
if colonIndex > 0 {
switch string(line[:colonIndex]) {
case "v": // 版本号
rs.Version = types.Int(line[colonIndex+1:])
case "b": // 总长度
rs.BodySize = types.Int64(line[colonIndex+1:])
case "r": // 范围信息
var commaIndex = bytes.IndexRune(line, ',')
if commaIndex > 0 {
rs.Ranges = append(rs.Ranges, [2]int64{types.Int64(line[colonIndex+1 : commaIndex]), types.Int64(line[commaIndex+1:])})
}
case "m": // Content-MD5
rs.ContentMD5 = string(line[colonIndex+1:])
}
}
data = data[index+1:]
if len(data) == 0 {
break
}
}
return rs, nil
}
// NewPartialRangesFromJSON 从JSON中解析范围
func NewPartialRangesFromJSON(data []byte) (*PartialRanges, error) {
var rs = NewPartialRanges(0)
err := json.Unmarshal(data, &rs)
if err != nil {
return nil, err
}
rs.Version = 0
return rs, nil
}
// NewPartialRangesFromFile 从文件中加载范围信息
func NewPartialRangesFromFile(path string) (*PartialRanges, error) {
data, err := SharedPartialRangesQueue.Get(path)
if err != nil {
return nil, err
}
if len(data) == 0 {
return NewPartialRanges(0), nil
}
// 兼容老的JSON格式
if data[0] == '{' {
return NewPartialRangesFromJSON(data)
}
// 新的格式
return NewPartialRangesFromData(data)
}
// Add 添加新范围
func (this *PartialRanges) Add(begin int64, end int64) {
if begin > end {
begin, end = end, begin
}
var nr = [2]int64{begin, end}
var count = len(this.Ranges)
if count == 0 {
this.Ranges = [][2]int64{nr}
return
}
// insert
var index = -1
for i, r := range this.Ranges {
if r[0] > begin || (r[0] == begin && r[1] >= end) {
index = i
this.Ranges = append(this.Ranges, [2]int64{})
copy(this.Ranges[index+1:], this.Ranges[index:])
this.Ranges[index] = nr
break
}
}
if index == -1 {
index = count
this.Ranges = append(this.Ranges, nr)
}
this.merge(index)
}
// Contains 检查是否包含某个范围
func (this *PartialRanges) Contains(begin int64, end int64) bool {
if len(this.Ranges) == 0 {
return false
}
for _, r2 := range this.Ranges {
if r2[0] <= begin && r2[1] >= end {
return true
}
}
return false
}
// Nearest 查找最近的某个范围
func (this *PartialRanges) Nearest(begin int64, end int64) (r [2]int64, ok bool) {
if len(this.Ranges) == 0 {
return
}
for _, r2 := range this.Ranges {
if r2[0] <= begin && r2[1] > begin {
r = [2]int64{begin, this.min(end, r2[1])}
ok = true
return
}
}
return
}
// FindRangeAtPosition 查找在某个位置上的范围
func (this *PartialRanges) FindRangeAtPosition(position int64) (r rangeutils.Range, ok bool) {
if len(this.Ranges) == 0 || position < 0 {
return
}
for _, r2 := range this.Ranges {
if r2[0] <= position && r2[1] > position {
return [2]int64{position, r2[1]}, true
}
}
return
}
// 转换为字符串
func (this *PartialRanges) String() string {
var s = "v:" + strconv.Itoa(this.Version) + "\n" + // version
"b:" + this.formatInt64(this.BodySize) + "\n" // bodySize
if len(this.ContentMD5) > 0 {
s += "m:" + this.ContentMD5 + "\n" // Content-MD5
}
for _, r := range this.Ranges {
s += "r:" + this.formatInt64(r[0]) + "," + this.formatInt64(r[1]) + "\n" // range
}
return s
}
// Bytes 将内容转换为字节
func (this *PartialRanges) Bytes() []byte {
return []byte(this.String())
}
// WriteToFile 写入到文件中
func (this *PartialRanges) WriteToFile(path string) error {
SharedPartialRangesQueue.Put(path, this.Bytes())
return nil
}
// Max 获取最大位置
func (this *PartialRanges) Max() int64 {
if len(this.Ranges) > 0 {
return this.Ranges[len(this.Ranges)-1][1]
}
return 0
}
// Reset 重置范围信息
func (this *PartialRanges) Reset() {
this.Ranges = [][2]int64{}
}
// IsCompleted 是否已下载完整
func (this *PartialRanges) IsCompleted() bool {
return len(this.Ranges) == 1 && this.Ranges[0][0] == 0 && this.Ranges[0][1] == this.BodySize-1
}
func (this *PartialRanges) merge(index int) {
// forward
var lastIndex = index
for i := index; i >= 1; i-- {
var curr = this.Ranges[i]
var prev = this.Ranges[i-1]
var w1 = this.w(curr)
var w2 = this.w(prev)
if w1+w2 >= this.max(curr[1], prev[1])-this.min(curr[0], prev[0])-1 {
prev = [2]int64{this.min(curr[0], prev[0]), this.max(curr[1], prev[1])}
this.Ranges[i-1] = prev
this.Ranges = append(this.Ranges[:i], this.Ranges[i+1:]...)
lastIndex = i - 1
} else {
break
}
}
// backward
index = lastIndex
for index < len(this.Ranges)-1 {
var curr = this.Ranges[index]
var next = this.Ranges[index+1]
var w1 = this.w(curr)
var w2 = this.w(next)
if w1+w2 >= this.max(curr[1], next[1])-this.min(curr[0], next[0])-1 {
curr = [2]int64{this.min(curr[0], next[0]), this.max(curr[1], next[1])}
this.Ranges = append(this.Ranges[:index], this.Ranges[index+1:]...)
this.Ranges[index] = curr
} else {
break
}
}
}
func (this *PartialRanges) w(r [2]int64) int64 {
return r[1] - r[0]
}
func (this *PartialRanges) min(n1 int64, n2 int64) int64 {
if n1 <= n2 {
return n1
}
return n2
}
func (this *PartialRanges) max(n1 int64, n2 int64) int64 {
if n1 >= n2 {
return n1
}
return n2
}
func (this *PartialRanges) formatInt64(i int64) string {
return strconv.FormatInt(i, 10)
}

View File

@@ -0,0 +1,144 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"sync"
)
var SharedPartialRangesQueue = NewPartialRangesQueue()
func init() {
if !teaconst.IsMain {
return
}
SharedPartialRangesQueue.Start()
}
const partialRangesQueueSharding = 8
// PartialRangesQueue ranges file writing queue
type PartialRangesQueue struct {
m [partialRangesQueueSharding]map[string][]byte // { filename => data, ... }
c chan string // filename1, ...
mu [partialRangesQueueSharding]*sync.RWMutex
}
// NewPartialRangesQueue Create new queue
func NewPartialRangesQueue() *PartialRangesQueue {
var queueSize = 512
var memGB = memutils.SystemMemoryGB()
if memGB > 16 {
queueSize = 8 << 10
} else if memGB > 8 {
queueSize = 4 << 10
} else if memGB > 4 {
queueSize = 2 << 10
} else if memGB > 2 {
queueSize = 1 << 10
}
var m = [partialRangesQueueSharding]map[string][]byte{}
var muList = [partialRangesQueueSharding]*sync.RWMutex{}
for i := 0; i < partialRangesQueueSharding; i++ {
muList[i] = &sync.RWMutex{}
m[i] = map[string][]byte{}
}
return &PartialRangesQueue{
mu: muList,
m: m,
c: make(chan string, queueSize),
}
}
// Start the queue
func (this *PartialRangesQueue) Start() {
goman.New(func() {
this.Dump()
})
}
// Put ranges data to filename
func (this *PartialRangesQueue) Put(filename string, data []byte) {
var index = this.indexForKey(filename)
this.mu[index].Lock()
this.m[index][filename] = data
this.mu[index].Unlock()
// always wait to finish
this.c <- filename
}
// Get ranges data from filename
func (this *PartialRangesQueue) Get(filename string) ([]byte, error) {
var index = this.indexForKey(filename)
this.mu[index].RLock()
data, ok := this.m[index][filename]
this.mu[index].RUnlock()
if ok {
return data, nil
}
return fsutils.ReadFile(filename)
}
// Delete ranges filename
func (this *PartialRangesQueue) Delete(filename string) {
var index = this.indexForKey(filename)
this.mu[index].Lock()
delete(this.m[index], filename)
this.mu[index].Unlock()
}
// Dump ranges to filename from memory
func (this *PartialRangesQueue) Dump() {
for filename := range this.c {
var index = this.indexForKey(filename)
this.mu[index].Lock()
data, ok := this.m[index][filename]
if ok {
delete(this.m[index], filename)
}
this.mu[index].Unlock()
if !ok || len(data) == 0 {
continue
}
err := fsutils.WriteFile(filename, data, 0666)
if err != nil {
remotelogs.Println("PARTIAL_RANGES_QUEUE", "write file '"+filename+"' failed: "+err.Error())
}
}
}
// Len count all files
func (this *PartialRangesQueue) Len() int {
var count int
for i := 0; i < partialRangesQueueSharding; i++ {
this.mu[i].RLock()
count += len(this.m[i])
this.mu[i].RUnlock()
}
return count
}
func (this *PartialRangesQueue) indexForKey(filename string) int {
return int(fnv.HashString(filename) % partialRangesQueueSharding)
}

View File

@@ -0,0 +1,31 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestNewPartialRangesQueue(t *testing.T) {
var a = assert.NewAssertion(t)
var queue = caches.NewPartialRangesQueue()
queue.Put("a", []byte{1, 2, 3})
t.Log("add 'a':", queue.Len())
t.Log(queue.Get("a"))
a.IsTrue(queue.Len() == 1)
queue.Put("a", nil)
t.Log("add 'a':", queue.Len())
a.IsTrue(queue.Len() == 1)
queue.Put("b", nil)
t.Log("add 'b':", queue.Len())
a.IsTrue(queue.Len() == 2)
queue.Delete("a")
t.Log("delete 'a':", queue.Len())
a.IsTrue(queue.Len() == 1)
}

View File

@@ -0,0 +1,239 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"github.com/iwind/TeaGo/logs"
"testing"
"time"
)
func TestNewPartialRanges(t *testing.T) {
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(50, 300)
r.Add(30, 80)
r.Add(30, 100)
r.Add(30, 400)
r.Add(1000, 10000)
r.Add(200, 1000)
r.Add(200, 10040)
logs.PrintAsJSON(r.Ranges, t)
t.Log("max:", r.Max())
}
func TestNewPartialRanges1(t *testing.T) {
var a = assert.NewAssertion(t)
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(1, 101)
r.Add(1, 102)
r.Add(2, 103)
r.Add(200, 300)
r.Add(1, 1000)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
a.IsTrue(len(rs) == 1)
if len(rs) == 1 {
a.IsTrue(rs[0][0] == 1)
a.IsTrue(rs[0][1] == 1000)
}
}
func TestNewPartialRanges2(t *testing.T) {
// low -> high
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(1, 101)
r.Add(1, 102)
r.Add(2, 103)
r.Add(200, 300)
r.Add(301, 302)
r.Add(303, 304)
r.Add(250, 400)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
}
func TestNewPartialRanges3(t *testing.T) {
// high -> low
var r = caches.NewPartialRanges(0)
r.Add(301, 302)
r.Add(303, 304)
r.Add(200, 300)
r.Add(250, 400)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
}
func TestNewPartialRanges4(t *testing.T) {
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 302)
r.Add(303, 304)
r.Add(305, 306)
r.Add(417, 417)
r.Add(410, 415)
r.Add(400, 409)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
t.Log(r.Contains(400, 416))
}
func TestNewPartialRanges5(t *testing.T) {
var r = caches.NewPartialRanges(0)
for j := 0; j < 1000; j++ {
r.Add(int64(j), int64(j+100))
}
logs.PrintAsJSON(r.Ranges, t)
}
func TestNewPartialRanges_Nearest(t *testing.T) {
{
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 400)
r.Add(401, 500)
r.Add(501, 600)
t.Log(r.Nearest(100, 200))
t.Log(r.Nearest(300, 350))
t.Log(r.Nearest(302, 350))
}
{
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 400)
r.Add(450, 500)
r.Add(550, 600)
t.Log(r.Nearest(100, 200))
t.Log(r.Nearest(300, 350))
t.Log(r.Nearest(302, 350))
t.Log(r.Nearest(302, 440))
t.Log(r.Nearest(302, 1000))
}
}
func TestNewPartialRanges_Large_Range(t *testing.T) {
var a = assert.NewAssertion(t)
var largeSize int64 = 10000000000000
t.Log(largeSize/1024/1024/1024, "G")
var r = caches.NewPartialRanges(0)
r.Add(1, largeSize)
var s = r.String()
t.Log(s)
r2, err := caches.NewPartialRangesFromData([]byte(s))
if err != nil {
t.Fatal(err)
}
a.IsTrue(largeSize == r2.Ranges[0][1])
logs.PrintAsJSON(r, t)
}
func TestPartialRanges_Encode_JSON(t *testing.T) {
var r = caches.NewPartialRanges(0)
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
var before = time.Now()
data, err := json.Marshal(r)
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log(len(data))
}
func TestPartialRanges_Encode_String(t *testing.T) {
var r = caches.NewPartialRanges(0)
r.BodySize = 1024
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
var sum = md5.Sum([]byte("123456"))
r.ContentMD5 = base64.StdEncoding.EncodeToString(sum[:])
var before = time.Now()
var data = r.String()
t.Log(data)
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log(len(data))
r2, err := caches.NewPartialRangesFromData([]byte(data))
if err != nil {
t.Fatal(err)
}
logs.PrintAsJSON(r2, t)
}
func TestPartialRanges_Version(t *testing.T) {
{
ranges, err := caches.NewPartialRangesFromData([]byte(`e:1668928495
r:0,1048576
r:1140260864,1140295164`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
{
ranges, err := caches.NewPartialRangesFromData([]byte(`e:1668928495
r:0,1048576
r:1140260864,1140295164
v:0
`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
{
ranges, err := caches.NewPartialRangesFromJSON([]byte(`{}`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
}
func BenchmarkNewPartialRanges(b *testing.B) {
for i := 0; i < b.N; i++ {
var r = caches.NewPartialRanges(0)
for j := 0; j < 1000; j++ {
r.Add(int64(j), int64(j+100))
}
}
}
func BenchmarkPartialRanges_String(b *testing.B) {
var r = caches.NewPartialRanges(0)
r.BodySize = 1024
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = r.String()
}
}

View File

@@ -0,0 +1,52 @@
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"io"
)
type ReaderFunc func(n int) (goNext bool, err error)
type Reader interface {
// Init 初始化
Init() error
// TypeName 类型名称
TypeName() string
// ExpiresAt 过期时间
ExpiresAt() int64
// Status 状态码
Status() int
// LastModified 最后修改时间
LastModified() int64
// ReadHeader 读取Header
ReadHeader(buf []byte, callback ReaderFunc) error
// ReadBody 读取Body
ReadBody(buf []byte, callback ReaderFunc) error
// Read 实现io.Reader接口
Read(buf []byte) (int, error)
// ReadBodyRange 读取某个范围内的Body
ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error
// HeaderSize Header Size
HeaderSize() int64
// BodySize Body Size
BodySize() int64
// ContainsRange 是否包含某个区间内容
ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool)
// SetNextReader 设置下一个内容Reader
SetNextReader(nextReader io.ReadCloser)
// Close 关闭
Close() error
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import "io"
type BaseReader struct {
nextReader io.ReadCloser
}
// SetNextReader 设置下一个内容Reader
func (this *BaseReader) SetNextReader(nextReader io.ReadCloser) {
this.nextReader = nextReader
}

View File

@@ -0,0 +1,428 @@
package caches
import (
"encoding/binary"
"errors"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
"os"
)
type FileReader struct {
BaseReader
fp *fsutils.File
openFile *OpenFile
openFileCache *OpenFileCache
meta []byte
header []byte
expiresAt int64
status int
headerOffset int64
headerSize int
bodySize int64
bodyOffset int64
isClosed bool
}
func NewFileReader(fp *fsutils.File) *FileReader {
return &FileReader{fp: fp}
}
func (this *FileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *FileReader) InitAutoDiscard(autoDiscard bool) error {
if this.openFile != nil {
this.meta = this.openFile.meta
this.header = this.openFile.header
}
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, err := this.readToBuff(this.fp, buf)
if err != nil {
return err
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
var status = types.Int(string(buf[OffsetStatus : OffsetStatus+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[OffsetURLLength : OffsetURLLength+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[OffsetHeaderLength : OffsetHeaderLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
var bodySize = int(binary.BigEndian.Uint64(buf[OffsetBodyLength : OffsetBodyLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
// read header
if this.openFileCache != nil && len(this.header) == 0 {
if headerSize > 0 && headerSize <= 512 {
this.header = make([]byte, headerSize)
_, err := this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
_, err = this.readToBuff(this.fp, this.header)
if err != nil {
return err
}
}
}
isOk = true
return nil
}
func (this *FileReader) TypeName() string {
return "disk"
}
func (this *FileReader) ExpiresAt() int64 {
return this.expiresAt
}
func (this *FileReader) Status() int {
return this.status
}
func (this *FileReader) LastModified() int64 {
stat, err := this.fp.Stat()
if err != nil {
return 0
}
return stat.ModTime().Unix()
}
func (this *FileReader) HeaderSize() int64 {
return int64(this.headerSize)
}
func (this *FileReader) BodySize() int64 {
return this.bodySize
}
func (this *FileReader) ReadHeader(buf []byte, callback ReaderFunc) error {
// 使用缓存
if len(this.header) > 0 && len(buf) >= len(this.header) {
copy(buf, this.header)
_, err := callback(len(this.header))
if err != nil {
return err
}
// 移动到Body位置
_, err = this.fp.Seek(this.bodyOffset, io.SeekStart)
if err != nil {
return err
}
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
_, err := this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
var headerSize = this.headerSize
for {
n, err := this.fp.Read(buf)
if n > 0 {
if n < headerSize {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
headerSize -= n
} else {
_, e := callback(headerSize)
if e != nil {
isOk = true
return e
}
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
// 移动到Body位置
_, err = this.fp.Seek(this.bodyOffset, io.SeekStart)
if err != nil {
return err
}
return nil
}
func (this *FileReader) ReadBody(buf []byte, callback ReaderFunc) error {
if this.bodySize == 0 {
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = this.bodyOffset
// 开始读Body部分
_, err := this.fp.Seek(offset, io.SeekStart)
if err != nil {
return err
}
for {
n, err := this.fp.Read(buf)
if n > 0 {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
func (this *FileReader) Read(buf []byte) (n int, err error) {
if this.bodySize == 0 {
n = 0
err = io.EOF
return
}
n, err = this.fp.Read(buf)
if err != nil && err != io.EOF {
_ = this.discard()
}
return
}
func (this *FileReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = start
if start < 0 {
offset = this.bodyOffset + this.bodySize + end
end = this.bodyOffset + this.bodySize - 1
} else if end < 0 {
offset = this.bodyOffset + start
end = this.bodyOffset + this.bodySize - 1
} else {
offset = this.bodyOffset + start
end = this.bodyOffset + end
}
if offset < 0 || end < 0 || offset > end {
isOk = true
return ErrInvalidRange
}
_, err := this.fp.Seek(offset, io.SeekStart)
if err != nil {
return err
}
for {
var n int
n, err = this.fp.Read(buf)
if n > 0 {
var n2 = int(end-offset) + 1
if n2 <= n {
_, e := callback(n2)
if e != nil {
isOk = true
return e
}
break
} else {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
offset += int64(n)
if offset > end {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
// 读取下一个Reader
if this.nextReader != nil {
defer func() {
_ = this.nextReader.Close()
}()
for {
var n int
n, err = this.nextReader.Read(buf)
if n > 0 {
goNext, writeErr := callback(n)
if writeErr != nil {
return writeErr
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
}
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *FileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
// FP 原始的文件句柄
func (this *FileReader) FP() *os.File {
return this.fp.Raw()
}
func (this *FileReader) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
if this.openFileCache != nil {
if this.openFile != nil {
this.openFileCache.Put(this.fp.Name(), this.openFile)
} else {
var cacheMeta = make([]byte, len(this.meta))
copy(cacheMeta, this.meta)
this.openFileCache.Put(this.fp.Name(), NewOpenFile(this.fp.Raw(), cacheMeta, this.header, this.LastModified(), this.bodySize))
}
return nil
}
return this.fp.Close()
}
func (this *FileReader) readToBuff(fp *fsutils.File, buf []byte) (ok bool, err error) {
n, err := fp.Read(buf)
if err != nil {
return false, err
}
ok = n == len(buf)
return
}
func (this *FileReader) discard() error {
_ = this.fp.Close()
this.isClosed = true
// close open file cache
if this.openFileCache != nil {
this.openFileCache.Close(this.fp.Name())
}
// remove file
return fsutils.Remove(this.fp.Name())
}

View File

@@ -0,0 +1,18 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !plus
package caches
import (
"errors"
"io"
)
type MMAPFileReader struct {
FileReader
}
func (this *MMAPFileReader) CopyBodyTo(writer io.Writer) (int, error) {
// stub
return 0, errors.New("not implemented")
}

View File

@@ -0,0 +1,22 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus && darwin
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
)
func IsValidForMMAPSize(size int64) bool {
var availableGB = int64(memutils.AvailableMemoryGB())
if availableGB < 1 {
return false
}
if mmap.TotalMMAPFileSize() > (availableGB << 28) /** 1/4 availableGB **/ {
return false
}
return size > (4<<10) && size < maxMMAPFileSize
}

View File

@@ -0,0 +1,22 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus && !darwin
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
)
func IsValidForMMAPSize(size int64) bool {
var availableGB = int64(memutils.AvailableMemoryGB())
if availableGB < 1 {
return false
}
if mmap.TotalMMAPFileSize() > (availableGB << 27) /** 1/8 availableGB **/ {
return false
}
return size > (256<<10) && size < maxMMAPFileSize
}

View File

@@ -0,0 +1,421 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus
package caches
import (
"encoding/binary"
"errors"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
"os"
"sync"
)
// 设置最大能映射的文件尺寸
var maxMMAPFileSize int64 = 8 << 20
func init() {
var estimatedSize = int64((memutils.SystemMemoryGB() * 4) << 20)
if estimatedSize > maxMMAPFileSize {
maxMMAPFileSize = estimatedSize
}
}
// MMAPFileReader 通过MMAP读取文件
type MMAPFileReader struct {
BaseReader
rawReader *mmap.SharedMMAP
path string
modifiedAt int64
meta []byte
header []byte
expiresAt int64
status int
headerOffset int64
headerSize int
bodySize int64
bodyOffset int64
currentOffset int64
isClosed bool
once sync.Once
}
func NewMMAPFileReaderFromPath(filename string) (*MMAPFileReader, error) {
reader, err := mmap.OpenSharedMMAP(filename)
if err != nil {
return nil, err
}
return NewMMAPFileReader(reader)
}
func NewMMAPFileReader(mmapReader *mmap.SharedMMAP) (*MMAPFileReader, error) {
return &MMAPFileReader{
path: mmapReader.Name(),
rawReader: mmapReader,
modifiedAt: mmapReader.Stat().ModTime().Unix(),
}, nil
}
func (this *MMAPFileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *MMAPFileReader) InitAutoDiscard(autoDiscard bool) error {
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, err := this.readNext(buf)
if err != nil {
return err
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
var status = types.Int(string(buf[OffsetStatus : OffsetStatus+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[OffsetURLLength : OffsetURLLength+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[OffsetHeaderLength : OffsetHeaderLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
var bodySize = int(binary.BigEndian.Uint64(buf[OffsetBodyLength : OffsetBodyLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
isOk = true
return nil
}
func (this *MMAPFileReader) TypeName() string {
return "disk"
}
func (this *MMAPFileReader) ExpiresAt() int64 {
return this.expiresAt
}
func (this *MMAPFileReader) Status() int {
return this.status
}
func (this *MMAPFileReader) LastModified() int64 {
if this.modifiedAt > 0 {
return this.modifiedAt
}
stat, err := os.Stat(this.path)
if err != nil {
return 0
}
return stat.ModTime().Unix()
}
func (this *MMAPFileReader) HeaderSize() int64 {
return int64(this.headerSize)
}
func (this *MMAPFileReader) BodySize() int64 {
return this.bodySize
}
func (this *MMAPFileReader) ReadHeader(buf []byte, callback ReaderFunc) error {
// 使用缓存
if len(this.header) > 0 && len(buf) >= len(this.header) {
copy(buf, this.header)
_, err := callback(len(this.header))
if err != nil {
return err
}
// 移动到Body位置
this.moveTo(this.bodyOffset)
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
this.moveTo(this.headerOffset)
var headerSize = this.headerSize
if len(buf) > headerSize {
n, err := this.rawReader.ReadAt(buf[:headerSize], this.headerOffset)
if err != nil {
if err != io.EOF {
return err
}
}
_, err = callback(n)
if err != nil {
isOk = true
return err
}
} else {
for {
n, err := this.read(buf)
if n > 0 {
if n < headerSize {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
headerSize -= n
} else {
_, e := callback(headerSize)
if e != nil {
isOk = true
return e
}
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
}
isOk = true
// 移动到Body位置
this.moveTo(this.bodyOffset)
return nil
}
func (this *MMAPFileReader) ReadBody(buf []byte, callback ReaderFunc) error {
if this.bodySize == 0 {
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = this.bodyOffset
// 开始读Body部分
this.moveTo(offset)
for {
n, err := this.read(buf)
if n > 0 {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
func (this *MMAPFileReader) Read(buf []byte) (n int, err error) {
if this.bodySize == 0 {
n = 0
err = io.EOF
return
}
n, err = this.read(buf)
if err != nil && err != io.EOF {
_ = this.discard()
}
return
}
func (this *MMAPFileReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = start
if start < 0 {
offset = this.bodyOffset + this.bodySize + end
end = this.bodyOffset + this.bodySize - 1
} else if end < 0 {
offset = this.bodyOffset + start
end = this.bodyOffset + this.bodySize - 1
} else {
offset = this.bodyOffset + start
end = this.bodyOffset + end
}
if offset < 0 || end < 0 || offset > end {
isOk = true
return ErrInvalidRange
}
this.moveTo(offset)
for {
n, err := this.read(buf)
if n > 0 {
var n2 = int(end-offset) + 1
if n2 <= n {
_, e := callback(n2)
if e != nil {
isOk = true
return e
}
break
} else {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
offset += int64(n)
if offset > end {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *MMAPFileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
func (this *MMAPFileReader) CopyBodyTo(writer io.Writer) (int, error) {
return this.rawReader.Write(writer, int(this.bodyOffset))
}
func (this *MMAPFileReader) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
var err error
this.once.Do(func() {
err = this.rawReader.Close()
})
return err
}
func (this *MMAPFileReader) moveTo(offset int64) {
this.currentOffset = offset
}
func (this *MMAPFileReader) readNext(buf []byte) (ok bool, err error) {
n, err := this.rawReader.ReadAt(buf, this.currentOffset)
if n > 0 {
this.currentOffset += int64(n)
}
if err != nil {
return false, err
}
ok = n == len(buf)
return
}
func (this *MMAPFileReader) read(p []byte) (n int, err error) {
n, err = this.rawReader.ReadAt(p, this.currentOffset)
if n > 0 {
this.currentOffset += int64(n)
}
return
}
func (this *MMAPFileReader) discard() error {
this.once.Do(func() {
_ = this.rawReader.Close()
})
this.isClosed = true
// remove file
return fsutils.Remove(this.path)
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,203 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/Tea"
"os"
"testing"
)
func TestFileReader(t *testing.T) {
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
_, path, _ := storage.keyPath("my-key")
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("file '" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
reader := NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
t.Fatal(err)
}
t.Log(reader.Status())
buf := make([]byte, 10)
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestFileReader_ReadHeader(t *testing.T) {
var path = "/Users/WorkSpace/EdgeProject/EdgeCache/p43/12/6b/126bbed90fc80f2bdfb19558948b0d49.cache"
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("'" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
var reader = NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
if os.IsNotExist(err) {
t.Log("file '" + path + "' not exists")
return
}
t.Fatal(err)
}
var buf = make([]byte, 16*1024)
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return
})
if err != nil {
t.Fatal(err)
}
}
func TestFileReader_Range(t *testing.T) {
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
/**writer, err := storage.Open("my-number", time.Now().Unix()+30*86400, 200, 6, 10)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte("Header"))
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte("0123456789"))
if err != nil {
t.Fatal(err)
}
_ = writer.Close()**/
_, path, _ := storage.keyPath("my-number")
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("'" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
reader := NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 6)
{
err = reader.ReadBodyRange(buf, 0, 0, func(n int) (goNext bool, err error) {
t.Log("[0, 0]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 7, 7, func(n int) (goNext bool, err error) {
t.Log("[7, 7]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 0, 10, func(n int) (goNext bool, err error) {
t.Log("[0, 10]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, 5, func(n int) (goNext bool, err error) {
t.Log("[3, 5]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, -1, -3, func(n int) (goNext bool, err error) {
t.Log("[, -3]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, -1, func(n int) (goNext bool, err error) {
t.Log("[3, ]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,210 @@
package caches
import (
"errors"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"io"
)
type MemoryReader struct {
BaseReader
item *MemoryItem
offset int
}
func NewMemoryReader(item *MemoryItem) *MemoryReader {
return &MemoryReader{item: item}
}
func (this *MemoryReader) Init() error {
return nil
}
func (this *MemoryReader) TypeName() string {
return "memory"
}
func (this *MemoryReader) ExpiresAt() int64 {
return this.item.ExpiresAt
}
func (this *MemoryReader) Status() int {
return this.item.Status
}
func (this *MemoryReader) LastModified() int64 {
return this.item.ModifiedAt
}
func (this *MemoryReader) HeaderSize() int64 {
return int64(len(this.item.HeaderValue))
}
func (this *MemoryReader) BodySize() int64 {
return int64(len(this.item.BodyValue))
}
func (this *MemoryReader) ReadHeader(buf []byte, callback ReaderFunc) error {
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(this.item.HeaderValue)
offset := 0
for {
left := size - offset
if l <= left {
copy(buf, this.item.HeaderValue[offset:offset+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, this.item.HeaderValue[offset:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset += l
if offset >= size {
break
}
}
return nil
}
func (this *MemoryReader) ReadBody(buf []byte, callback ReaderFunc) error {
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(this.item.BodyValue)
offset := 0
for {
left := size - offset
if l <= left {
copy(buf, this.item.BodyValue[offset:offset+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, this.item.BodyValue[offset:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset += l
if offset >= size {
break
}
}
return nil
}
func (this *MemoryReader) Read(buf []byte) (n int, err error) {
bufLen := len(buf)
if bufLen == 0 {
return 0, errors.New("using empty buffer")
}
bodySize := len(this.item.BodyValue)
left := bodySize - this.offset
if bufLen <= left {
copy(buf, this.item.BodyValue[this.offset:this.offset+bufLen])
n = bufLen
this.offset += bufLen
if this.offset >= bodySize {
err = io.EOF
return
}
return
} else {
copy(buf, this.item.BodyValue[this.offset:])
n = left
err = io.EOF
return
}
}
func (this *MemoryReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
offset := start
bodySize := int64(len(this.item.BodyValue))
if start < 0 {
offset = bodySize + end
end = bodySize - 1
} else if end < 0 {
offset = start
end = bodySize - 1
}
if end >= bodySize {
end = bodySize - 1
}
if offset < 0 || end < 0 || offset > end {
return ErrInvalidRange
}
newData := this.item.BodyValue[offset : end+1]
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(newData)
offset2 := 0
for {
left := size - offset2
if l <= left {
copy(buf, newData[offset2:offset2+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, newData[offset2:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset2 += l
if offset2 >= size {
break
}
}
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *MemoryReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
func (this *MemoryReader) Close() error {
return nil
}

View File

@@ -0,0 +1,105 @@
package caches
import "testing"
func TestMemoryReader_Header(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: []byte("0123456789"),
BodyValue: nil,
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
err := reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("buf:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestMemoryReader_Body(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: nil,
BodyValue: []byte("0123456789"),
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
err := reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("buf:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestMemoryReader_Body_Range(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: nil,
BodyValue: []byte("0123456789"),
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
var err error
{
err = reader.ReadBodyRange(buf, 0, 0, func(n int) (goNext bool, err error) {
t.Log("[0, 0]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 7, 7, func(n int) (goNext bool, err error) {
t.Log("[7, 7]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 0, 10, func(n int) (goNext bool, err error) {
t.Log("[0, 10]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, 5, func(n int) (goNext bool, err error) {
t.Log("[3, 5]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, -1, -3, func(n int) (goNext bool, err error) {
t.Log("[, -3]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, -1, func(n int) (goNext bool, err error) {
t.Log("[3, ]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,152 @@
package caches
import (
"encoding/binary"
"errors"
"fmt"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
)
type PartialFileReader struct {
*FileReader
ranges *PartialRanges
rangePath string
}
func NewPartialFileReader(fp *fsutils.File) *PartialFileReader {
return &PartialFileReader{
FileReader: NewFileReader(fp),
rangePath: PartialRangesFilePath(fp.Name()),
}
}
func (this *PartialFileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *PartialFileReader) InitAutoDiscard(autoDiscard bool) error {
if this.openFile != nil {
this.meta = this.openFile.meta
this.header = this.openFile.header
}
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
// 读取Range
ranges, err := NewPartialRangesFromFile(this.rangePath)
if err != nil {
return fmt.Errorf("read ranges failed: %w", err)
}
this.ranges = ranges
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, readErr := this.readToBuff(this.fp, buf)
if readErr != nil {
return readErr
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
status := types.Int(string(buf[SizeExpiresAt : SizeExpiresAt+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[SizeExpiresAt+SizeStatus : SizeExpiresAt+SizeStatus+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[SizeExpiresAt+SizeStatus+SizeURLLength : SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
bodySize := int(binary.BigEndian.Uint64(buf[SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength : SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
// read header
if this.openFileCache != nil && len(this.header) == 0 {
if headerSize > 0 && headerSize <= 512 {
this.header = make([]byte, headerSize)
_, err = this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
_, err = this.readToBuff(this.fp, this.header)
if err != nil {
return err
}
}
}
isOk = true
return nil
}
// ContainsRange 是否包含某些区间内容
// 这里的 r 是已经经过格式化的
func (this *PartialFileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
r2, ok = this.ranges.Nearest(r.Start(), r.End())
if ok && this.bodySize > 0 {
// 考虑可配置
const minSpan = 128 << 10
// 这里限制返回的最小缓存,防止因为返回的内容过小而导致请求过多
if r2.Length() < r.Length() && r2.Length() < minSpan {
ok = false
}
}
return
}
// MaxLength 获取区间最大长度
func (this *PartialFileReader) MaxLength() int64 {
if this.bodySize > 0 {
return this.bodySize
}
return this.ranges.Max() + 1
}
func (this *PartialFileReader) Ranges() *PartialRanges {
return this.ranges
}
func (this *PartialFileReader) IsCompleted() bool {
return this.ranges != nil && this.ranges.IsCompleted()
}
func (this *PartialFileReader) discard() error {
SharedPartialRangesQueue.Delete(this.rangePath)
_ = fsutils.Remove(this.rangePath)
return this.FileReader.discard()
}

View File

@@ -0,0 +1,7 @@
package caches
type Stat struct {
Count int // 数量
ValueSize int64 // 值占用的空间
Size int64 // 占用的空间尺寸
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !plus
package caches
func (this *FileStorage) tryMMAPReader(isPartial bool, estimatedSize int64, path string) (Reader, error) {
// stub
return nil, nil
}

View File

@@ -0,0 +1,58 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
"os"
)
func (this *FileStorage) tryMMAPReader(isPartial bool, estimatedSize int64, path string) (Reader, error) {
// TODO 因为在实践中MMAP耗费了太多CPU在未解决之前先暂停支持
return nil, nil
var options = this.options // copy
if estimatedSize > 0 &&
!isPartial &&
(options != nil && options.EnableMMAP) &&
IsValidForMMAPSize(estimatedSize) {
var isOk bool
defer func() {
if !isOk {
_ = this.removeCacheFile(path)
}
}()
sharedMMAP, err := mmap.OpenSharedMMAP(path)
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
reader, err := NewMMAPFileReader(sharedMMAP)
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
err = reader.Init()
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
isOk = true
return reader, nil
}
return nil, nil
}

View File

@@ -0,0 +1,707 @@
package caches
import (
"bytes"
"errors"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/utils"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
"github.com/iwind/TeaGo/logs"
"io"
"net/http"
"runtime"
"strconv"
"sync"
"testing"
"time"
)
func TestFileStorage_Init(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
//t.Log(storage.list.m)
/**err = storage.Write("c", bytes.NewReader([]byte("i am c")), 4, "second")
if err != nil {
t.Fatal(err)
}**/
//logs.PrintAsJSON(storage.list.m, t)
time.Sleep(2 * time.Second)
storage.purgeLoop()
t.Log(storage.list.(*SQLiteFileList).Stat(func(hash string) bool {
return true
}))
}
func TestFileStorage_OpenWriter(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
header := []byte("Header")
body := []byte("This is Body")
writer, err := storage.OpenWriter("my-key", time.Now().Unix()+86400, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
t.Log(writer)
_, err = writer.WriteHeader(header)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write(body)
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log("header:", writer.HeaderSize(), "body:", writer.BodySize())
t.Log("ok")
}
func TestFileStorage_OpenWriter_Partial(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 2,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
writer, err := storage.OpenWriter("my-key", time.Now().Unix()+86400, 200, -1, -1, -1, true)
if err != nil {
t.Fatal(err)
}
_, err = writer.WriteHeader([]byte("Content-Type:text/html; charset=utf-8"))
if err != nil {
t.Fatal(err)
}
err = writer.WriteAt(0, []byte("Hello, World"))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(writer)
}
func TestFileStorage_OpenWriter_HTTP(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
writer, err := storage.OpenWriter("my-http-response", time.Now().Unix()+86400, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
t.Log(writer)
resp := &http.Response{
StatusCode: http.StatusOK,
Header: http.Header{
"Content-Type": []string{"text/html; charset=utf-8"},
"Last-Modified": []string{"Wed, 06 Jan 2021 10:03:29 GMT"},
"Server": []string{"CDN-Server"},
},
Body: io.NopCloser(bytes.NewBuffer([]byte("THIS IS HTTP BODY"))),
}
for k, v := range resp.Header {
for _, v1 := range v {
_, err = writer.WriteHeader([]byte(k + ":" + v1 + "\n"))
if err != nil {
t.Fatal(err)
}
}
}
buf := make([]byte, 1024)
for {
n, err := resp.Body.Read(buf)
if n > 0 {
_, err = writer.Write(buf[:n])
if err != nil {
t.Fatal(err)
}
}
if err != nil {
break
}
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log("header:", writer.HeaderSize(), "body:", writer.BodySize())
t.Log("ok")
}
func TestFileStorage_Concurrent_Open_DifferentFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
wg := sync.WaitGroup{}
count := 100
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
writer, err := storage.OpenWriter("abc"+strconv.Itoa(i), time.Now().Unix()+3600, 200, -1, -1, -1, false)
if err != nil {
if errors.Is(err, ErrFileIsWriting) {
t.Error(err)
return
}
return
}
//t.Log(writer)
_, err = writer.Write([]byte("Hello,World"))
if err != nil {
t.Error(err)
return
}
// 故意造成慢速写入
time.Sleep(1 * time.Second)
err = writer.Close()
if err != nil {
t.Error(err)
return
}
}(i)
}
wg.Wait()
}
func TestFileStorage_Concurrent_Open_SameFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
wg := sync.WaitGroup{}
count := 100
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
writer, err := storage.OpenWriter("abc"+strconv.Itoa(0), time.Now().Unix()+3600, 200, -1, -1, -1, false)
if err != nil {
if errors.Is(err, ErrFileIsWriting) {
t.Error(err)
return
}
return
}
//t.Log(writer)
t.Log("writing")
_, err = writer.Write([]byte("Hello,World"))
if err != nil {
t.Error(err)
return
}
// 故意造成慢速写入
time.Sleep(time.Duration(1) * time.Second)
err = writer.Close()
if err != nil {
t.Error(err)
return
}
}(i)
}
wg.Wait()
}
func TestFileStorage_Read(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
reader, err := storage.OpenReader("my-key", false, false)
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 6)
t.Log(reader.Status())
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Read_HTTP_Response(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
reader, err := storage.OpenReader("my-http-response", false, false)
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 32)
t.Log(reader.Status())
headerBuf := []byte{}
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
headerBuf = append(headerBuf, buf...)
for {
nIndex := bytes.Index(headerBuf, []byte{'\n'})
if nIndex >= 0 {
row := headerBuf[:nIndex]
spaceIndex := bytes.Index(row, []byte{':'})
if spaceIndex <= 0 {
return false, errors.New("invalid header")
}
t.Log("header row:", string(row[:spaceIndex]), string(row[spaceIndex+1:]))
headerBuf = headerBuf[nIndex+1:]
} else {
break
}
}
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Read_NotFound(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
buf := make([]byte, 6)
reader, err := storage.OpenReader("my-key-10000", false, false)
if err != nil {
if err == ErrNotFound {
t.Log("cache not fund")
return
}
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Delete(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
err = storage.Delete("my-key")
if err != nil {
t.Fatal(err)
}
t.Log("ok")
}
func TestFileStorage_Stat(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
stat, err := storage.Stat()
if err != nil {
t.Fatal(err)
}
logs.PrintAsJSON(stat, t)
}
func TestFileStorage_CleanAll(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
c, _ := storage.list.Count()
t.Log("before:", c)
err = storage.CleanAll()
if err != nil {
t.Fatal(err)
}
c, _ = storage.list.Count()
t.Log("after:", c)
t.Log("ok")
}
func TestFileStorage_Stop(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
storage.Stop()
}
func TestFileStorage_DecodeFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
_, path, _ := storage.keyPath("my-key")
t.Log(path)
}
func TestFileStorage_RemoveCacheFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(nil)
defer storage.Stop()
t.Log(storage.removeCacheFile("/Users/WorkSpace/EdgeProject/EdgeCache/p43/15/7e/157eba0dfc6dfb6fbbf20b1f9e584674.cache"))
}
func TestFileStorage_ScanGarbageCaches(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 43,
Options: map[string]any{"dir": "/Users/WorkSpace/EdgeProject/EdgeCache"},
})
err := storage.Init()
if err != nil {
t.Fatal(err)
}
err = storage.ScanGarbageCaches(func(path string) error {
t.Log(path, PartialRangesFilePath(path))
return nil
})
if err != nil {
t.Fatal(err)
}
}
func BenchmarkFileStorage_Read(b *testing.B) {
runtime.GOMAXPROCS(1)
_ = utils.SetRLimit(1 << 20)
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
reader, err := storage.OpenReader("my-key", false, false)
if err != nil {
b.Fatal(err)
}
buf := make([]byte, 1024)
_ = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
return true, nil
})
_ = reader.Close()
}
}
func BenchmarkFileStorage_KeyPath(b *testing.B) {
runtime.GOMAXPROCS(1)
var storage = &FileStorage{
options: &serverconfigs.HTTPFileCacheStorage{},
policy: &serverconfigs.HTTPCachePolicy{Id: 1},
}
for i := 0; i < b.N; i++ {
_, _, _ = storage.keyPath(strconv.Itoa(i))
}
}

View File

@@ -0,0 +1,61 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
)
// StorageInterface 缓存存储接口
type StorageInterface interface {
// Init 初始化
Init() error
// OpenReader 读取缓存
OpenReader(key string, useStale bool, isPartial bool) (reader Reader, err error)
// OpenWriter 打开缓存写入器等待写入
// size 和 maxSize 可能为-1
OpenWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64, maxSize int64, isPartial bool) (Writer, error)
// OpenFlushWriter 打开从其他媒介直接刷入的写入器
OpenFlushWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64) (Writer, error)
// Delete 删除某个键值对应的缓存
Delete(key string) error
// Stat 统计缓存
Stat() (*Stat, error)
// TotalDiskSize 消耗的磁盘尺寸
TotalDiskSize() int64
// TotalMemorySize 内存尺寸
TotalMemorySize() int64
// CleanAll 清除所有缓存
CleanAll() error
// Purge 批量删除缓存
// urlType 值为file|dir
Purge(keys []string, urlType string) error
// Stop 停止缓存策略
Stop()
// Policy 获取当前存储的Policy
Policy() *serverconfigs.HTTPCachePolicy
// UpdatePolicy 修改策略
UpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy)
// CanUpdatePolicy 检查策略是否可以更新
CanUpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy) bool
// AddToList 将缓存添加到列表
AddToList(item *Item)
// IgnoreKey 忽略某个Key即不缓存某个Key
IgnoreKey(key string, maxSize int64)
// CanSendfile 是否支持Sendfile
CanSendfile() bool
}

View File

@@ -0,0 +1,730 @@
package caches
import (
"fmt"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
setutils "github.com/TeaOSLab/EdgeNode/internal/utils/sets"
"github.com/TeaOSLab/EdgeNode/internal/utils/trackers"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/cespare/xxhash/v2"
"github.com/iwind/TeaGo/types"
"math"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
type MemoryItem struct {
ExpiresAt int64
HeaderValue []byte
BodyValue []byte
Status int
IsDone bool
ModifiedAt int64
IsPrepared bool
WriteOffset int64
isReferring bool // if it is referring by other objects
}
func (this *MemoryItem) IsExpired() bool {
return this.ExpiresAt < fasttime.Now().Unix()
}
const (
MemoryShardCount = 256 // 优化:分片数量,降低锁粒度
)
// memoryShard 内存缓存分片
type memoryShard struct {
locker sync.RWMutex
valuesMap map[uint64]*MemoryItem // hash => item
}
type MemoryStorage struct {
parentStorage StorageInterface
policy *serverconfigs.HTTPCachePolicy
list ListInterface
// 优化:使用分片锁替代全局锁
shards [MemoryShardCount]*memoryShard
dirtyChan chan string // hash chan
dirtyQueueSize int
purgeTicker *utils.Ticker
usedSize int64
totalDirtySize int64
writingKeyMap map[string]zero.Zero // key => bool
writingLocker sync.Mutex // 保护 writingKeyMap
ignoreKeys *setutils.FixedSet
}
func NewMemoryStorage(policy *serverconfigs.HTTPCachePolicy, parentStorage StorageInterface) *MemoryStorage {
var dirtyChan chan string
var queueSize = policy.MemoryAutoFlushQueueSize
if parentStorage != nil {
if queueSize <= 0 {
queueSize = memutils.SystemMemoryGB() * 100_000
}
dirtyChan = make(chan string, queueSize)
}
// 优化:初始化分片
storage := &MemoryStorage{
parentStorage: parentStorage,
policy: policy,
list: NewMemoryList(),
dirtyChan: dirtyChan,
dirtyQueueSize: queueSize,
writingKeyMap: map[string]zero.Zero{},
ignoreKeys: setutils.NewFixedSet(32768),
}
// 初始化所有分片
for i := 0; i < MemoryShardCount; i++ {
storage.shards[i] = &memoryShard{
valuesMap: map[uint64]*MemoryItem{},
}
}
return storage
}
// getShard 根据 hash 获取对应的分片
func (this *MemoryStorage) getShard(hash uint64) *memoryShard {
return this.shards[hash%MemoryShardCount]
}
// Init 初始化
func (this *MemoryStorage) Init() error {
_ = this.list.Init()
this.list.OnAdd(func(item *Item) {
atomic.AddInt64(&this.usedSize, item.TotalSize())
})
this.list.OnRemove(func(item *Item) {
atomic.AddInt64(&this.usedSize, -item.TotalSize())
})
this.initPurgeTicker()
// 启动定时Flush memory to disk任务
if this.parentStorage != nil {
var threads = 2
var numCPU = runtime.NumCPU()
if fsutils.DiskIsExtremelyFast() {
if numCPU >= 8 {
threads = 8
} else {
threads = 4
}
} else if fsutils.DiskIsFast() {
if numCPU >= 4 {
threads = 4
}
}
for i := 0; i < threads; i++ {
goman.New(func() {
this.startFlush()
})
}
}
return nil
}
// OpenReader 读取缓存
func (this *MemoryStorage) OpenReader(key string, useStale bool, isPartial bool) (Reader, error) {
var hash = this.hash(key)
// check if exists in list
exists, _, _ := this.list.Exist(types.String(hash))
if !exists {
return nil, ErrNotFound
}
// 优化:使用分片锁,只锁定对应的分片
shard := this.getShard(hash)
shard.locker.RLock()
var item = shard.valuesMap[hash]
if item != nil {
item.isReferring = true
}
if item == nil || !item.IsDone {
shard.locker.RUnlock()
return nil, ErrNotFound
}
if useStale || (item.ExpiresAt > fasttime.Now().Unix()) {
var reader = NewMemoryReader(item)
err := reader.Init()
if err != nil {
shard.locker.RUnlock()
return nil, err
}
shard.locker.RUnlock()
return reader, nil
}
shard.locker.RUnlock()
_ = this.Delete(key)
return nil, ErrNotFound
}
// OpenWriter 打开缓存写入器等待写入
func (this *MemoryStorage) OpenWriter(key string, expiredAt int64, status int, headerSize int, bodySize int64, maxSize int64, isPartial bool) (Writer, error) {
if maxSize > 0 && this.ignoreKeys.Has(types.String(maxSize)+"$"+key) {
return nil, ErrEntityTooLarge
}
// TODO 内存缓存暂时不支持分块内容存储
if isPartial {
return nil, fmt.Errorf("%w (004)", ErrFileIsWriting)
}
return this.openWriter(key, expiredAt, status, headerSize, bodySize, maxSize, true)
}
// OpenFlushWriter 打开从其他媒介直接刷入的写入器
func (this *MemoryStorage) OpenFlushWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64) (Writer, error) {
return this.openWriter(key, expiresAt, status, headerSize, bodySize, -1, true)
}
func (this *MemoryStorage) openWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64, maxSize int64, isDirty bool) (Writer, error) {
// 待写入队列是否已满
if isDirty &&
this.parentStorage != nil &&
this.dirtyQueueSize > 0 &&
len(this.dirtyChan) >= this.dirtyQueueSize-64 /** delta **/ { // 缓存时间过长
return nil, ErrWritingQueueFull
}
// 优化writingKeyMap 使用独立锁保护
this.writingLocker.Lock()
// 是否正在写入
var isWriting = false
_, ok := this.writingKeyMap[key]
if ok {
this.writingLocker.Unlock()
return nil, fmt.Errorf("%w (005)", ErrFileIsWriting)
}
this.writingKeyMap[key] = zero.New()
this.writingLocker.Unlock()
defer func() {
if !isWriting {
this.writingLocker.Lock()
delete(this.writingKeyMap, key)
this.writingLocker.Unlock()
}
}()
// 优化:使用分片锁,只锁定对应的分片
var hash = this.hash(key)
shard := this.getShard(hash)
shard.locker.Lock()
defer shard.locker.Unlock()
// 检查是否过期
item, ok := shard.valuesMap[hash]
if ok && !item.IsExpired() {
var hashString = types.String(hash)
exists, _, _ := this.list.Exist(hashString)
if !exists {
// remove from values map
delete(shard.valuesMap, hash)
_ = this.list.Remove(hashString)
item = nil
} else {
return nil, fmt.Errorf("%w (006)", ErrFileIsWriting)
}
}
// 检查是否超出容量最大值
var capacityBytes = this.memoryCapacityBytes()
if bodySize < 0 {
bodySize = 0
}
if capacityBytes > 0 && capacityBytes <= atomic.LoadInt64(&this.usedSize)+bodySize {
return nil, NewCapacityError("write memory cache failed: over memory size: " + strconv.FormatInt(capacityBytes, 10) + ", current size: " + strconv.FormatInt(this.usedSize, 10) + " bytes")
}
// 先删除
err := this.deleteWithoutLocker(key)
if err != nil {
return nil, err
}
isWriting = true
return NewMemoryWriter(this, key, expiresAt, status, isDirty, bodySize, maxSize, func(valueItem *MemoryItem) {
// 优化:清理写入锁时使用独立锁
this.writingLocker.Lock()
delete(this.writingKeyMap, key)
this.writingLocker.Unlock()
}), nil
}
// Delete 删除某个键值对应的缓存
func (this *MemoryStorage) Delete(key string) error {
var hash = this.hash(key)
// 优化:使用分片锁
shard := this.getShard(hash)
shard.locker.Lock()
delete(shard.valuesMap, hash)
_ = this.list.Remove(types.String(hash))
shard.locker.Unlock()
return nil
}
// Stat 统计缓存
func (this *MemoryStorage) Stat() (*Stat, error) {
// 优化Stat 不需要锁定所有分片,直接使用 list 的统计
return this.list.Stat(func(hash string) bool {
return true
})
}
// CleanAll 清除所有缓存
func (this *MemoryStorage) CleanAll() error {
// 优化:清理所有分片
for i := 0; i < MemoryShardCount; i++ {
this.shards[i].locker.Lock()
this.shards[i].valuesMap = map[uint64]*MemoryItem{}
this.shards[i].locker.Unlock()
}
_ = this.list.Reset()
atomic.StoreInt64(&this.usedSize, 0)
return nil
}
// Purge 批量删除缓存
func (this *MemoryStorage) Purge(keys []string, urlType string) error {
// 目录
if urlType == "dir" {
for _, key := range keys {
// 检查是否有通配符 http(s)://*.example.com
var schemeIndex = strings.Index(key, "://")
if schemeIndex > 0 {
var keyRight = key[schemeIndex+3:]
if strings.HasPrefix(keyRight, "*.") {
err := this.list.CleanMatchPrefix(key)
if err != nil {
return err
}
continue
}
}
err := this.list.CleanPrefix(key)
if err != nil {
return err
}
}
return nil
}
// URL
for _, key := range keys {
// 检查是否有通配符 http(s)://*.example.com
var schemeIndex = strings.Index(key, "://")
if schemeIndex > 0 {
var keyRight = key[schemeIndex+3:]
if strings.HasPrefix(keyRight, "*.") {
err := this.list.CleanMatchKey(key)
if err != nil {
return err
}
continue
}
}
err := this.Delete(key)
if err != nil {
return err
}
}
return nil
}
// Stop 停止缓存策略
func (this *MemoryStorage) Stop() {
// 优化:清理所有分片
for i := 0; i < MemoryShardCount; i++ {
this.shards[i].locker.Lock()
this.shards[i].valuesMap = map[uint64]*MemoryItem{}
this.shards[i].locker.Unlock()
}
this.writingLocker.Lock()
this.writingKeyMap = map[string]zero.Zero{}
this.writingLocker.Unlock()
_ = this.list.Reset()
if this.purgeTicker != nil {
this.purgeTicker.Stop()
}
if this.dirtyChan != nil {
close(this.dirtyChan)
}
this.usedSize = 0
this.totalDirtySize = 0
_ = this.list.Close()
this.ignoreKeys.Reset()
// 回收内存
runtime.GC()
remotelogs.Println("CACHE", "close memory storage '"+strconv.FormatInt(this.policy.Id, 10)+"'")
}
// Policy 获取当前存储的Policy
func (this *MemoryStorage) Policy() *serverconfigs.HTTPCachePolicy {
return this.policy
}
// UpdatePolicy 修改策略
func (this *MemoryStorage) UpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy) {
var oldPolicy = this.policy
this.policy = newPolicy
if oldPolicy.MemoryAutoPurgeInterval != newPolicy.MemoryAutoPurgeInterval {
this.initPurgeTicker()
}
// 如果是空的,则清空
if newPolicy.CapacityBytes() == 0 {
_ = this.CleanAll()
}
// reset ignored keys
this.ignoreKeys.Reset()
}
// CanUpdatePolicy 检查策略是否可以更新
func (this *MemoryStorage) CanUpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy) bool {
return newPolicy != nil && newPolicy.Type == serverconfigs.CachePolicyStorageMemory
}
// AddToList 将缓存添加到列表
func (this *MemoryStorage) AddToList(item *Item) {
// skip added item
if item.MetaSize > 0 {
return
}
item.MetaSize = int64(len(item.Key)) + 128 /** 128是我们评估的数据结构的长度 **/
var hash = types.String(this.hash(item.Key))
if len(item.Host) == 0 {
item.Host = ParseHost(item.Key)
}
_ = this.list.Add(hash, item)
}
// TotalDiskSize 消耗的磁盘尺寸
func (this *MemoryStorage) TotalDiskSize() int64 {
return 0
}
// TotalMemorySize 内存尺寸
func (this *MemoryStorage) TotalMemorySize() int64 {
return atomic.LoadInt64(&this.usedSize)
}
// IgnoreKey 忽略某个Key即不缓存某个Key
func (this *MemoryStorage) IgnoreKey(key string, maxSize int64) {
this.ignoreKeys.Push(types.String(maxSize) + "$" + key)
}
// CanSendfile 是否支持Sendfile
func (this *MemoryStorage) CanSendfile() bool {
return false
}
// HasFreeSpaceForHotItems 是否有足够的空间提供给热门内容
func (this *MemoryStorage) HasFreeSpaceForHotItems() bool {
return atomic.LoadInt64(&this.usedSize) < this.memoryCapacityBytes()*3/4
}
// 计算Key Hash
func (this *MemoryStorage) hash(key string) uint64 {
return xxhash.Sum64String(key)
}
// 清理任务
func (this *MemoryStorage) purgeLoop() {
// 清理过期
var purgeCount = this.policy.MemoryAutoPurgeCount
if purgeCount <= 0 {
purgeCount = 2000
}
_, _ = this.list.Purge(purgeCount, func(hash string) error {
uintHash, err := strconv.ParseUint(hash, 10, 64)
if err == nil {
// 优化:使用分片锁
shard := this.getShard(uintHash)
shard.locker.Lock()
delete(shard.valuesMap, uintHash)
shard.locker.Unlock()
}
return nil
})
// LFU
// 计算是否应该开启LFU清理
var capacityBytes = this.policy.CapacityBytes()
var startLFU = false
var usedPercent = float32(this.TotalMemorySize()*100) / float32(capacityBytes)
var lfuFreePercent = this.policy.MemoryLFUFreePercent
if lfuFreePercent <= 0 {
lfuFreePercent = 5
}
if capacityBytes > 0 {
if lfuFreePercent < 100 {
if usedPercent >= 100-lfuFreePercent {
startLFU = true
}
}
}
if startLFU {
var total, _ = this.list.Count()
if total > 0 {
var count = types.Int(math.Ceil(float64(total) * float64(lfuFreePercent*2) / 100))
if count > 0 {
// 限制单次清理的条数,防止占用太多系统资源
if count > 2000 {
count = 2000
}
// 这里不提示LFU因为此事件将会非常频繁
err := this.list.PurgeLFU(count, func(hash string) error {
uintHash, err := strconv.ParseUint(hash, 10, 64)
if err == nil {
// 优化:使用分片锁
shard := this.getShard(uintHash)
shard.locker.Lock()
delete(shard.valuesMap, uintHash)
shard.locker.Unlock()
}
return nil
})
if err != nil {
remotelogs.Warn("CACHE", "purge memory storage in LFU failed: "+err.Error())
}
}
}
}
}
// 开始Flush任务
func (this *MemoryStorage) startFlush() {
var statCount = 0
for key := range this.dirtyChan {
statCount++
if statCount == 100 {
statCount = 0
}
this.flushItem(key)
if fsutils.IsInExtremelyHighLoad {
time.Sleep(1 * time.Second)
}
}
}
// 单次Flush任务
func (this *MemoryStorage) flushItem(fullKey string) {
sizeString, key, found := strings.Cut(fullKey, "@")
if !found {
return
}
defer func() {
atomic.AddInt64(&this.totalDirtySize, -types.Int64(sizeString))
}()
if this.parentStorage == nil {
return
}
var hash = this.hash(key)
// 优化:使用分片锁
shard := this.getShard(hash)
shard.locker.RLock()
item, ok := shard.valuesMap[hash]
shard.locker.RUnlock()
// 从内存中移除,并确保无论如何都会执行
defer func() {
_ = this.Delete(key)
}()
if !ok {
return
}
if !item.IsDone {
remotelogs.Error("CACHE", "flush items failed: open writer failed: item has not been done")
return
}
if item.IsExpired() {
return
}
// 检查是否在列表中防止未加入列表时就开始flush
isInList, _, err := this.list.Exist(types.String(hash))
if err != nil {
remotelogs.Error("CACHE", "flush items failed: "+err.Error())
return
}
if !isInList {
for i := 0; i < 1000; i++ {
isInList, _, err = this.list.Exist(types.String(hash))
if err != nil {
remotelogs.Error("CACHE", "flush items failed: "+err.Error())
time.Sleep(1 * time.Second)
continue
}
if isInList {
break
}
time.Sleep(1 * time.Millisecond)
}
if !isInList {
// discard
return
}
}
writer, err := this.parentStorage.OpenFlushWriter(key, item.ExpiresAt, item.Status, len(item.HeaderValue), int64(len(item.BodyValue)))
if err != nil {
if !CanIgnoreErr(err) {
remotelogs.Error("CACHE", "flush items failed: open writer failed: "+err.Error())
}
return
}
_, err = writer.WriteHeader(item.HeaderValue)
if err != nil {
_ = writer.Discard()
remotelogs.Error("CACHE", "flush items failed: write header failed: "+err.Error())
return
}
_, err = writer.Write(item.BodyValue)
if err != nil {
_ = writer.Discard()
remotelogs.Error("CACHE", "flush items failed: writer body failed: "+err.Error())
return
}
err = writer.Close()
if err != nil {
_ = writer.Discard()
remotelogs.Error("CACHE", "flush items failed: close writer failed: "+err.Error())
return
}
this.parentStorage.AddToList(&Item{
Type: writer.ItemType(),
Key: key,
Host: ParseHost(key),
ExpiresAt: item.ExpiresAt,
HeaderSize: writer.HeaderSize(),
BodySize: writer.BodySize(),
})
}
func (this *MemoryStorage) memoryCapacityBytes() int64 {
var maxSystemBytes = SharedManager.MaxSystemMemoryBytesPerStorage()
if this.policy == nil {
return maxSystemBytes
}
if SharedManager.MaxMemoryCapacity != nil {
var capacityBytes = SharedManager.MaxMemoryCapacity.Bytes()
if capacityBytes > 0 {
if capacityBytes > maxSystemBytes {
return maxSystemBytes
}
return capacityBytes
}
}
var capacity = this.policy.Capacity // copy
if capacity != nil {
var capacityBytes = capacity.Bytes()
if capacityBytes > 0 {
if capacityBytes > maxSystemBytes {
return maxSystemBytes
}
return capacityBytes
}
}
return maxSystemBytes
}
func (this *MemoryStorage) deleteWithoutLocker(key string) error {
hash := this.hash(key)
// 优化:使用分片锁(注意:这个方法在已持有锁的情况下调用,需要检查调用者)
shard := this.getShard(hash)
// 注意:此方法在 openWriter 中调用时,已经持有 shard.locker所以这里不需要再次加锁
// 但为了安全,我们检查调用者是否已经持有锁
delete(shard.valuesMap, hash)
_ = this.list.Remove(types.String(hash))
return nil
}
func (this *MemoryStorage) initPurgeTicker() {
var autoPurgeInterval = this.policy.MemoryAutoPurgeInterval
if autoPurgeInterval <= 0 {
autoPurgeInterval = 5
}
// 启动定时清理任务
if this.purgeTicker != nil {
this.purgeTicker.Stop()
}
this.purgeTicker = utils.NewTicker(time.Duration(autoPurgeInterval) * time.Second)
goman.New(func() {
for this.purgeTicker.Next() {
var tr = trackers.Begin("MEMORY_CACHE_STORAGE_PURGE_LOOP")
this.purgeLoop()
tr.End()
}
})
}

View File

@@ -0,0 +1,412 @@
package caches
import (
"bytes"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/rands"
"math/rand"
"runtime"
"runtime/debug"
"strconv"
"sync"
"testing"
"time"
)
func TestMemoryStorage_OpenWriter(t *testing.T) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
writer, err := storage.OpenWriter("abc", time.Now().Unix()+60, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
_, _ = writer.WriteHeader([]byte("Header"))
_, _ = writer.Write([]byte("Hello"))
_, _ = writer.Write([]byte(", World"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(storage.valuesMap)
{
reader, err := storage.OpenReader("abc", false, false)
if err != nil {
if err == ErrNotFound {
t.Log("not found: abc")
return
} else {
t.Fatal(err)
}
}
buf := make([]byte, 1024)
t.Log("status:", reader.Status())
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
_, err := storage.OpenReader("abc 2", false, false)
if err != nil {
if err == ErrNotFound {
t.Log("not found: abc2")
} else {
t.Fatal(err)
}
}
}
writer, err = storage.OpenWriter("abc", time.Now().Unix()+60, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello123"))
{
reader, err := storage.OpenReader("abc", false, false)
if err != nil {
if err == ErrNotFound {
t.Log("not found: abc")
} else {
t.Fatal(err)
}
}
buf := make([]byte, 1024)
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("abc:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
}
func TestMemoryStorage_OpenReaderLock(t *testing.T) {
storage := NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
_ = storage.Init()
var h = storage.hash("test")
storage.valuesMap = map[uint64]*MemoryItem{
h: {
IsDone: true,
},
}
_, _ = storage.OpenReader("test", false, false)
}
func TestMemoryStorage_Delete(t *testing.T) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
{
writer, err := storage.OpenWriter("abc", time.Now().Unix()+60, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(len(storage.valuesMap))
}
{
writer, err := storage.OpenWriter("abc1", time.Now().Unix()+60, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(len(storage.valuesMap))
}
_ = storage.Delete("abc1")
t.Log(len(storage.valuesMap))
}
func TestMemoryStorage_Stat(t *testing.T) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
expiredAt := time.Now().Unix() + 60
{
writer, err := storage.OpenWriter("abc", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(len(storage.valuesMap))
storage.AddToList(&Item{
Key: "abc",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
{
writer, err := storage.OpenWriter("abc1", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(len(storage.valuesMap))
storage.AddToList(&Item{
Key: "abc1",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
stat, err := storage.Stat()
if err != nil {
t.Fatal(err)
}
t.Log("===stat===")
logs.PrintAsJSON(stat, t)
}
func TestMemoryStorage_CleanAll(t *testing.T) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
var expiredAt = time.Now().Unix() + 60
{
writer, err := storage.OpenWriter("abc", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
storage.AddToList(&Item{
Key: "abc",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
{
writer, err := storage.OpenWriter("abc1", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
storage.AddToList(&Item{
Key: "abc1",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
err := storage.CleanAll()
if err != nil {
t.Fatal(err)
}
total, _ := storage.list.Count()
t.Log(total, len(storage.valuesMap))
}
func TestMemoryStorage_Purge(t *testing.T) {
storage := NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
expiredAt := time.Now().Unix() + 60
{
writer, err := storage.OpenWriter("abc", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
storage.AddToList(&Item{
Key: "abc",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
{
writer, err := storage.OpenWriter("abc1", expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
storage.AddToList(&Item{
Key: "abc1",
BodySize: 5,
ExpiresAt: expiredAt,
})
}
err := storage.Purge([]string{"abc", "abc1"}, "")
if err != nil {
t.Fatal(err)
}
total, _ := storage.list.Count()
t.Log(total, len(storage.valuesMap))
}
func TestMemoryStorage_Expire(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{
MemoryAutoPurgeInterval: 5,
}, nil)
err := storage.Init()
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
expiredAt := time.Now().Unix() + int64(rands.Int(0, 60))
key := "abc" + strconv.Itoa(i)
writer, err := storage.OpenWriter(key, expiredAt, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
_, _ = writer.Write([]byte("Hello"))
err = writer.Close()
if err != nil {
t.Fatal(err)
}
storage.AddToList(&Item{
Key: key,
BodySize: 5,
ExpiresAt: expiredAt,
})
}
time.Sleep(70 * time.Second)
}
func TestMemoryStorage_Locker(t *testing.T) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
err := storage.Init()
if err != nil {
t.Fatal(err)
}
storage.locker.Lock()
err = storage.deleteWithoutLocker("a")
storage.locker.Unlock()
if err != nil {
t.Fatal(err)
}
t.Log("ok")
}
func TestMemoryStorage_Stop(t *testing.T) {
var stat1 = &runtime.MemStats{}
runtime.ReadMemStats(stat1)
var m = map[uint64]*MemoryItem{}
for i := 0; i < 1_000_000; i++ {
m[uint64(i)] = &MemoryItem{
HeaderValue: []byte("Hello, World"),
BodyValue: bytes.Repeat([]byte("Hello"), 1024),
}
}
m = map[uint64]*MemoryItem{}
var before = time.Now()
//runtime.GC()
debug.FreeOSMemory()
/**go func() {
time.Sleep(10 * time.Second)
runtime.GC()
}()**/
t.Log(time.Since(before).Seconds()*1000, "ms")
var stat2 = &runtime.MemStats{}
runtime.ReadMemStats(stat2)
if stat2.HeapInuse > stat1.HeapInuse {
t.Log(stat2.HeapInuse, stat1.HeapInuse, (stat2.HeapInuse-stat1.HeapInuse)/1024/1024, "MB")
} else {
t.Log("0 MB")
}
t.Log(len(m))
}
func BenchmarkValuesMap(b *testing.B) {
var m = map[uint64]*MemoryItem{}
var count = 1_000_000
for i := 0; i < count; i++ {
m[uint64(i)] = &MemoryItem{
ExpiresAt: time.Now().Unix(),
}
}
b.Log(len(m))
var locker = sync.Mutex{}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
locker.Lock()
_, ok := m[uint64(rands.Int(0, 1_000_000))]
_ = ok
locker.Unlock()
locker.Lock()
delete(m, uint64(rands.Int(2, 1000000)))
locker.Unlock()
}
})
}
func BenchmarkNewMemoryStorage(b *testing.B) {
var storage = NewMemoryStorage(&serverconfigs.HTTPCachePolicy{}, nil)
var data = bytes.Repeat([]byte{'A'}, 1024)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
func() {
writer, err := storage.OpenWriter("abc"+strconv.Itoa(rand.Int()), time.Now().Unix()+60, 200, -1, -1, -1, false)
if err != nil {
b.Fatal(err)
}
if err != nil {
b.Fatal(err)
}
_, _ = writer.WriteHeader([]byte("Header"))
_, _ = writer.Write(data)
err = writer.Close()
if err != nil {
b.Fatal(err)
}
}()
}
})
}

View File

@@ -0,0 +1,9 @@
//go:build !windows
package caches
import "syscall"
func tryLockFile(fd int) error {
return syscall.Flock(fd, syscall.LOCK_EX|syscall.LOCK_NB)
}

View File

@@ -0,0 +1,30 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/configutils"
"net"
"strings"
)
func ParseHost(key string) string {
var schemeIndex = strings.Index(key, "://")
if schemeIndex <= 0 {
return ""
}
var firstSlashIndex = strings.Index(key[schemeIndex+3:], "/")
if firstSlashIndex <= 0 {
return ""
}
var host = key[schemeIndex+3 : schemeIndex+3+firstSlashIndex]
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = configutils.QuoteIP(hostPart)
}
return host
}

View File

@@ -0,0 +1,18 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import "strings"
// PartialRangesFilePath 获取 ranges 文件路径
func PartialRangesFilePath(path string) string {
// ranges路径
var dotIndex = strings.LastIndex(path, ".")
var rangePath string
if dotIndex < 0 {
rangePath = path + "@ranges.cache"
} else {
rangePath = path[:dotIndex] + "@ranges" + path[dotIndex:]
}
return rangePath
}

View File

@@ -0,0 +1,51 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/cespare/xxhash/v2"
"github.com/iwind/TeaGo/types"
"strconv"
"testing"
)
func TestParseHost(t *testing.T) {
for _, u := range []string{
"https://goedge.cn/hello/world",
"https://goedge.cn:8080/hello/world",
"https://goedge.cn/hello/world?v=1&t=123",
"https://[::1]:1234/hello/world?v=1&t=123",
"https://[::1]/hello/world?v=1&t=123",
"https://127.0.0.1/hello/world?v=1&t=123",
"https:/hello/world?v=1&t=123",
"123456",
} {
t.Log(u, "=>", caches.ParseHost(u))
}
}
func TestUintString(t *testing.T) {
t.Log(strconv.FormatUint(xxhash.Sum64String("https://goedge.cn/"), 10))
t.Log(strconv.FormatUint(123456789, 10))
t.Logf("%d", 1234567890123)
}
func BenchmarkUint_String(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = strconv.FormatUint(1234567890123, 10)
}
}
func BenchmarkUint_String2(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = types.String(1234567890123)
}
}
func BenchmarkUint_String3(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = fmt.Sprintf("%d", 1234567890123)
}
}

View File

@@ -0,0 +1,34 @@
package caches
// Writer 缓存内容写入接口
type Writer interface {
// WriteHeader 写入Header数据
WriteHeader(data []byte) (n int, err error)
// Write 写入Body数据
Write(data []byte) (n int, err error)
// WriteAt 在指定位置写入数据
WriteAt(offset int64, data []byte) error
// HeaderSize 写入的Header数据大小
HeaderSize() int64
// BodySize 写入的Body数据大小
BodySize() int64
// Close 关闭
Close() error
// Discard 丢弃
Discard() error
// Key Key
Key() string
// ExpiredAt 过期时间
ExpiredAt() int64
// ItemType 内容类型
ItemType() ItemType
}

View File

@@ -0,0 +1,261 @@
package caches
import (
"bufio"
"encoding/binary"
"errors"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/types"
"io"
"strings"
"sync"
)
const (
DefaultWriteBufferSize = 64 << 10 // 默认写入缓冲区大小64KB
)
type FileWriter struct {
storage StorageInterface
rawWriter *fsutils.File
bufWriter *bufio.Writer // 优化:引入 bufio.Writer 减少系统调用
key string
metaHeaderSize int
headerSize int64
metaBodySize int64 // 写入前的内容长度
bodySize int64
expiredAt int64
maxSize int64
endFunc func()
once sync.Once
modifiedBytes int
}
func NewFileWriter(storage StorageInterface, rawWriter *fsutils.File, key string, expiredAt int64, metaHeaderSize int, metaBodySize int64, maxSize int64, endFunc func()) *FileWriter {
writer := &FileWriter{
storage: storage,
key: key,
rawWriter: rawWriter,
expiredAt: expiredAt,
maxSize: maxSize,
endFunc: endFunc,
metaHeaderSize: metaHeaderSize,
metaBodySize: metaBodySize,
}
// 优化:创建 bufio.Writer减少系统调用
writer.bufWriter = bufio.NewWriterSize(rawWriter, DefaultWriteBufferSize)
return writer
}
// WriteHeader 写入数据
func (this *FileWriter) WriteHeader(data []byte) (n int, err error) {
// 优化:使用 bufio.Writer 减少系统调用
// 注意Header 写入通常较小,但为了保持一致性也使用缓冲
n, err = this.bufWriter.Write(data)
this.headerSize += int64(n)
if err != nil {
_ = this.Discard()
}
return
}
// WriteHeaderLength 写入Header长度数据
func (this *FileWriter) WriteHeaderLength(headerLength int) error {
if this.metaHeaderSize > 0 && this.metaHeaderSize == headerLength {
return nil
}
// 优化:先 Flush 缓冲区,确保之前的数据已写入
err := this.bufWriter.Flush()
if err != nil {
_ = this.Discard()
return err
}
var bytes4 = make([]byte, 4)
binary.BigEndian.PutUint32(bytes4, uint32(headerLength))
_, err = this.rawWriter.Seek(SizeExpiresAt+SizeStatus+SizeURLLength, io.SeekStart)
if err != nil {
_ = this.Discard()
return err
}
// 注意Seek 后需要重新创建 bufio.Writer因为底层文件位置已改变
// 但这里只写入 4 字节,直接写入即可
_, err = this.rawWriter.Write(bytes4)
if err != nil {
_ = this.Discard()
return err
}
// Seek 后重新创建 bufio.Writer因为文件位置已改变
this.bufWriter.Reset(this.rawWriter)
return nil
}
// Write 写入数据
func (this *FileWriter) Write(data []byte) (n int, err error) {
// split LARGE data
var l = len(data)
if l > (2 << 20) {
var offset = 0
const bufferSize = 64 << 10
for {
var end = offset + bufferSize
if end > l {
end = l
}
n1, err1 := this.write(data[offset:end])
n += n1
if err1 != nil {
return n, err1
}
if end >= l {
return n, nil
}
offset = end
}
}
// write NORMAL size data
return this.write(data)
}
// WriteAt 在指定位置写入数据
func (this *FileWriter) WriteAt(offset int64, data []byte) error {
_ = data
_ = offset
return errors.New("not supported")
}
// WriteBodyLength 写入Body长度数据
func (this *FileWriter) WriteBodyLength(bodyLength int64) error {
if this.metaBodySize >= 0 && bodyLength == this.metaBodySize {
return nil
}
// 优化:先 Flush 缓冲区,确保之前的数据已写入
err := this.bufWriter.Flush()
if err != nil {
_ = this.Discard()
return err
}
var bytes8 = make([]byte, 8)
binary.BigEndian.PutUint64(bytes8, uint64(bodyLength))
_, err = this.rawWriter.Seek(SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength, io.SeekStart)
if err != nil {
_ = this.Discard()
return err
}
// 注意Seek 后需要重新创建 bufio.Writer因为底层文件位置已改变
// 但这里只写入 8 字节,直接写入即可
_, err = this.rawWriter.Write(bytes8)
if err != nil {
_ = this.Discard()
return err
}
// Seek 后重新创建 bufio.Writer因为文件位置已改变
this.bufWriter.Reset(this.rawWriter)
return nil
}
// Close 关闭
func (this *FileWriter) Close() error {
defer this.once.Do(func() {
this.endFunc()
})
var path = this.rawWriter.Name()
// 优化:确保所有缓冲数据都已写入
err := this.bufWriter.Flush()
if err != nil {
_ = this.rawWriter.Close()
_ = fsutils.Remove(path)
return err
}
// check content length
if this.metaBodySize > 0 && this.bodySize != this.metaBodySize {
_ = this.rawWriter.Close()
_ = fsutils.Remove(path)
return ErrUnexpectedContentLength
}
err = this.WriteHeaderLength(types.Int(this.headerSize))
if err != nil {
_ = this.rawWriter.Close()
_ = fsutils.Remove(path)
return err
}
err = this.WriteBodyLength(this.bodySize)
if err != nil {
_ = this.rawWriter.Close()
_ = fsutils.Remove(path)
return err
}
err = this.rawWriter.Close()
if err != nil {
_ = fsutils.Remove(path)
} else if strings.HasSuffix(path, FileTmpSuffix) {
err = fsutils.Rename(path, strings.Replace(path, FileTmpSuffix, "", 1))
if err != nil {
_ = fsutils.Remove(path)
}
}
return err
}
// Discard 丢弃
func (this *FileWriter) Discard() error {
defer this.once.Do(func() {
this.endFunc()
})
_ = this.rawWriter.Close()
err := fsutils.Remove(this.rawWriter.Name())
return err
}
func (this *FileWriter) HeaderSize() int64 {
return this.headerSize
}
func (this *FileWriter) BodySize() int64 {
return this.bodySize
}
func (this *FileWriter) ExpiredAt() int64 {
return this.expiredAt
}
func (this *FileWriter) Key() string {
return this.key
}
// ItemType 获取内容类型
func (this *FileWriter) ItemType() ItemType {
return ItemTypeFile
}
func (this *FileWriter) write(data []byte) (n int, err error) {
// 优化:使用 bufio.Writer 减少系统调用
n, err = this.bufWriter.Write(data)
this.bodySize += int64(n)
if this.maxSize > 0 && this.bodySize > this.maxSize {
err = ErrEntityTooLarge
if this.storage != nil {
this.storage.IgnoreKey(this.key, this.maxSize)
}
}
if err != nil {
_ = this.Discard()
}
return
}

View File

@@ -0,0 +1,196 @@
package caches
import (
"errors"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/cespare/xxhash/v2"
"github.com/iwind/TeaGo/types"
"sync"
"sync/atomic"
)
type MemoryWriter struct {
storage *MemoryStorage
key string
expiredAt int64
headerSize int64
bodySize int64
status int
isDirty bool
expectedBodySize int64
maxSize int64
hash uint64
item *MemoryItem
endFunc func(valueItem *MemoryItem)
once sync.Once
}
func NewMemoryWriter(memoryStorage *MemoryStorage, key string, expiredAt int64, status int, isDirty bool, expectedBodySize int64, maxSize int64, endFunc func(valueItem *MemoryItem)) *MemoryWriter {
var valueItem = &MemoryItem{
ExpiresAt: expiredAt,
ModifiedAt: fasttime.Now().Unix(),
Status: status,
}
if expectedBodySize > 0 {
valueItem.BodyValue = make([]byte, 0, expectedBodySize)
}
var w = &MemoryWriter{
storage: memoryStorage,
key: key,
expiredAt: expiredAt,
item: valueItem,
status: status,
isDirty: isDirty,
expectedBodySize: expectedBodySize,
maxSize: maxSize,
endFunc: endFunc,
}
w.hash = w.calculateHash(key)
return w
}
// WriteHeader 写入数据
func (this *MemoryWriter) WriteHeader(data []byte) (n int, err error) {
this.headerSize += int64(len(data))
this.item.HeaderValue = append(this.item.HeaderValue, data...)
return len(data), nil
}
// Write 写入数据
func (this *MemoryWriter) Write(data []byte) (n int, err error) {
var l = len(data)
if l == 0 {
return
}
if this.item.IsPrepared {
if this.item.WriteOffset+int64(l) > this.expectedBodySize {
err = ErrWritingUnavailable
return
}
copy(this.item.BodyValue[this.item.WriteOffset:], data)
this.item.WriteOffset += int64(l)
} else {
this.item.BodyValue = append(this.item.BodyValue, data...)
}
this.bodySize += int64(l)
// 检查尺寸
if this.maxSize > 0 && this.bodySize > this.maxSize {
err = ErrEntityTooLarge
this.storage.IgnoreKey(this.key, this.maxSize)
return l, err
}
return l, nil
}
// WriteAt 在指定位置写入数据
func (this *MemoryWriter) WriteAt(offset int64, b []byte) error {
_ = b
_ = offset
return errors.New("not supported")
}
// HeaderSize 数据尺寸
func (this *MemoryWriter) HeaderSize() int64 {
return this.headerSize
}
// BodySize 主体内容尺寸
func (this *MemoryWriter) BodySize() int64 {
return this.bodySize
}
// Close 关闭
func (this *MemoryWriter) Close() error {
// 需要在Locker之外
defer this.once.Do(func() {
this.endFunc(this.item)
})
if this.item == nil {
return nil
}
// 优化:使用分片锁
shard := this.storage.getShard(this.hash)
// check content length
if this.expectedBodySize > 0 && this.bodySize != this.expectedBodySize {
shard.locker.Lock()
delete(shard.valuesMap, this.hash)
shard.locker.Unlock()
return ErrUnexpectedContentLength
}
shard.locker.Lock()
this.item.IsDone = true
var err error
if this.isDirty {
if this.storage.parentStorage != nil {
shard.valuesMap[this.hash] = this.item
select {
case this.storage.dirtyChan <- types.String(this.bodySize) + "@" + this.key:
atomic.AddInt64(&this.storage.totalDirtySize, this.bodySize)
default:
// remove from values map
delete(shard.valuesMap, this.hash)
err = ErrWritingQueueFull
}
} else {
shard.valuesMap[this.hash] = this.item
}
} else {
shard.valuesMap[this.hash] = this.item
}
shard.locker.Unlock()
return err
}
// Discard 丢弃
func (this *MemoryWriter) Discard() error {
// 需要在Locker之外
defer this.once.Do(func() {
this.endFunc(this.item)
})
// 优化:使用分片锁
shard := this.storage.getShard(this.hash)
shard.locker.Lock()
delete(shard.valuesMap, this.hash)
shard.locker.Unlock()
return nil
}
// Key 获取Key
func (this *MemoryWriter) Key() string {
return this.key
}
// ExpiredAt 过期时间
func (this *MemoryWriter) ExpiredAt() int64 {
return this.expiredAt
}
// ItemType 内容类型
func (this *MemoryWriter) ItemType() ItemType {
return ItemTypeMemory
}
// 计算Key Hash
func (this *MemoryWriter) calculateHash(key string) uint64 {
return xxhash.Sum64String(key)
}

View File

@@ -0,0 +1,143 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"bytes"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"math/rand"
"strconv"
"testing"
"time"
)
func TestNewMemoryWriter(t *testing.T) {
var storage = caches.NewMemoryStorage(&serverconfigs.HTTPCachePolicy{
Id: 0,
IsOn: false,
Name: "",
Description: "",
Capacity: &shared.SizeCapacity{
Count: 8,
Unit: shared.SizeCapacityUnitGB,
},
}, nil)
err := storage.Init()
if err != nil {
t.Fatal(err)
}
const size = 1 << 20
const chunkSize = 16 << 10
var data = bytes.Repeat([]byte{'A'}, chunkSize)
var before = time.Now()
var writer = caches.NewMemoryWriter(storage, "a", time.Now().Unix()+3600, 200, false, size, 1<<30, func(valueItem *caches.MemoryItem) {
t.Log(len(valueItem.BodyValue), "bytes")
})
for i := 0; i < size/chunkSize; i++ {
_, err = writer.Write(data)
if err != nil {
t.Fatal(err)
}
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log("cost:", time.Since(before).Seconds()*1000, "ms")
}
func BenchmarkMemoryWriter_Capacity(b *testing.B) {
b.ReportAllocs()
var storage = caches.NewMemoryStorage(&serverconfigs.HTTPCachePolicy{
Id: 0,
IsOn: false,
Name: "",
Description: "",
Capacity: &shared.SizeCapacity{
Count: 8,
Unit: shared.SizeCapacityUnitGB,
},
}, nil)
initErr := storage.Init()
if initErr != nil {
b.Fatal(initErr)
}
const size = 1 << 20
const chunkSize = 16 << 10
var data = bytes.Repeat([]byte{'A'}, chunkSize)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var writer = caches.NewMemoryWriter(storage, "a"+strconv.Itoa(rand.Int()), time.Now().Unix()+3600, 200, false, size, 1<<30, func(valueItem *caches.MemoryItem) {
})
for i := 0; i < size/chunkSize; i++ {
_, err := writer.Write(data)
if err != nil {
b.Fatal(err)
}
}
err := writer.Close()
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkMemoryWriter_Capacity_Disabled(b *testing.B) {
b.ReportAllocs()
var storage = caches.NewMemoryStorage(&serverconfigs.HTTPCachePolicy{
Id: 0,
IsOn: false,
Name: "",
Description: "",
Capacity: &shared.SizeCapacity{
Count: 8,
Unit: shared.SizeCapacityUnitGB,
},
}, nil)
initErr := storage.Init()
if initErr != nil {
b.Fatal(initErr)
}
const size = 1 << 20
const chunkSize = 16 << 10
var data = bytes.Repeat([]byte{'A'}, chunkSize)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var writer = caches.NewMemoryWriter(storage, "a"+strconv.Itoa(rand.Int()), time.Now().Unix()+3600, 200, false, 0, 1<<30, func(valueItem *caches.MemoryItem) {
})
for i := 0; i < size/chunkSize; i++ {
_, err := writer.Write(data)
if err != nil {
b.Fatal(err)
}
}
err := writer.Close()
if err != nil {
b.Fatal(err)
}
}
})
}

View File

@@ -0,0 +1,307 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"encoding/binary"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/types"
"io"
"strings"
"sync"
)
type PartialFileWriter struct {
rawWriter *fsutils.File
key string
metaHeaderSize int
headerSize int64
metaBodySize int64
bodySize int64
expiredAt int64
endFunc func()
once sync.Once
isNew bool
isPartial bool
bodyOffset int64
ranges *PartialRanges
rangePath string
writtenBytes int64
}
func NewPartialFileWriter(rawWriter *fsutils.File, key string, expiredAt int64, metaHeaderSize int, metaBodySize int64, isNew bool, isPartial bool, bodyOffset int64, ranges *PartialRanges, endFunc func()) *PartialFileWriter {
return &PartialFileWriter{
key: key,
rawWriter: rawWriter,
expiredAt: expiredAt,
endFunc: endFunc,
isNew: isNew,
isPartial: isPartial,
bodyOffset: bodyOffset,
ranges: ranges,
rangePath: PartialRangesFilePath(rawWriter.Name()),
metaHeaderSize: metaHeaderSize,
metaBodySize: metaBodySize,
}
}
// WriteHeader 写入数据
func (this *PartialFileWriter) WriteHeader(data []byte) (n int, err error) {
if !this.isNew {
return
}
n, err = this.rawWriter.Write(data)
this.headerSize += int64(n)
if err != nil {
_ = this.Discard()
}
return
}
func (this *PartialFileWriter) AppendHeader(data []byte) error {
_, err := this.rawWriter.Write(data)
if err != nil {
_ = this.Discard()
} else {
var c = len(data)
this.headerSize += int64(c)
err = this.WriteHeaderLength(int(this.headerSize))
if err != nil {
_ = this.Discard()
}
}
return err
}
// WriteHeaderLength 写入Header长度数据
func (this *PartialFileWriter) WriteHeaderLength(headerLength int) error {
if this.metaHeaderSize > 0 && this.metaHeaderSize == headerLength {
return nil
}
var bytes4 = make([]byte, 4)
binary.BigEndian.PutUint32(bytes4, uint32(headerLength))
_, err := this.rawWriter.Seek(SizeExpiresAt+SizeStatus+SizeURLLength, io.SeekStart)
if err != nil {
_ = this.Discard()
return err
}
_, err = this.rawWriter.Write(bytes4)
if err != nil {
_ = this.Discard()
return err
}
return nil
}
// Write 写入数据
func (this *PartialFileWriter) Write(data []byte) (n int, err error) {
n, err = this.rawWriter.Write(data)
this.bodySize += int64(n)
if err != nil {
_ = this.Discard()
}
return
}
// WriteAt 在指定位置写入数据
func (this *PartialFileWriter) WriteAt(offset int64, data []byte) error {
var c = int64(len(data))
if c == 0 {
return nil
}
var end = offset + c - 1
// 是否已包含在内
if this.ranges.Contains(offset, end) {
return nil
}
// prevent extending too much space in a single writing
var maxOffset = this.ranges.Max()
if offset-maxOffset > 16<<20 {
var extendSizePerStep int64 = 1 << 20
var maxExtendSize int64 = 32 << 20
if fsutils.DiskIsExtremelyFast() {
maxExtendSize = 128 << 20
extendSizePerStep = 4 << 20
} else if fsutils.DiskIsFast() {
maxExtendSize = 64 << 20
extendSizePerStep = 2 << 20
}
if offset-maxOffset > maxExtendSize {
stat, err := this.rawWriter.Stat()
if err != nil {
return nil
}
// extend min size to prepare for file tail
if stat.Size()+extendSizePerStep <= this.bodyOffset+offset+int64(len(data)) {
_ = this.rawWriter.Truncate(stat.Size() + extendSizePerStep)
return nil
}
}
}
if this.bodyOffset == 0 {
var keyLength = 0
if this.ranges.Version == 0 { // 以往的版本包含有Key
keyLength = len(this.key)
}
this.bodyOffset = SizeMeta + int64(keyLength) + this.headerSize
}
n, err := this.rawWriter.WriteAt(data, this.bodyOffset+offset)
if err != nil {
return err
}
this.ranges.Add(offset, end)
// 保存ranges内容到文件当新增数据达到一定量时就更新是为了及时更新ranges文件以便于其他请求能够及时读取到已经缓存的部分内容
this.writtenBytes += int64(n)
if this.writtenBytes > (1 << 20) {
this.writtenBytes = 0
if len(this.rangePath) > 0 {
if this.bodySize > 0 {
this.ranges.BodySize = this.bodySize
}
_ = this.ranges.WriteToFile(this.rangePath)
}
}
return nil
}
// SetBodyLength 设置内容总长度
func (this *PartialFileWriter) SetBodyLength(bodyLength int64) {
this.bodySize = bodyLength
}
// SetContentMD5 设置内容MD5
func (this *PartialFileWriter) SetContentMD5(contentMD5 string) {
if strings.Contains(contentMD5, "\n") || len(contentMD5) > 128 {
return
}
this.ranges.ContentMD5 = contentMD5
}
// WriteBodyLength 写入Body长度数据
func (this *PartialFileWriter) WriteBodyLength(bodyLength int64) error {
if this.metaBodySize > 0 && this.metaBodySize == bodyLength {
return nil
}
var bytes8 = make([]byte, 8)
binary.BigEndian.PutUint64(bytes8, uint64(bodyLength))
_, err := this.rawWriter.Seek(SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength, io.SeekStart)
if err != nil {
_ = this.Discard()
return err
}
_, err = this.rawWriter.Write(bytes8)
if err != nil {
_ = this.Discard()
return err
}
return nil
}
// Close 关闭
func (this *PartialFileWriter) Close() error {
defer this.once.Do(func() {
this.endFunc()
})
if this.bodySize > 0 {
this.ranges.BodySize = this.bodySize
}
err := this.ranges.WriteToFile(this.rangePath)
if err != nil {
_ = this.rawWriter.Close()
this.remove()
return err
}
// 关闭当前writer
if this.isNew {
err = this.WriteHeaderLength(types.Int(this.headerSize))
if err != nil {
_ = this.rawWriter.Close()
this.remove()
return err
}
err = this.WriteBodyLength(this.bodySize)
if err != nil {
_ = this.rawWriter.Close()
this.remove()
return err
}
}
err = this.rawWriter.Close()
if err != nil {
this.remove()
}
return err
}
// Discard 丢弃
func (this *PartialFileWriter) Discard() error {
defer this.once.Do(func() {
this.endFunc()
})
_ = this.rawWriter.Close()
SharedPartialRangesQueue.Delete(this.rangePath)
_ = fsutils.Remove(this.rangePath)
err := fsutils.Remove(this.rawWriter.Name())
return err
}
func (this *PartialFileWriter) HeaderSize() int64 {
return this.headerSize
}
func (this *PartialFileWriter) BodySize() int64 {
return this.bodySize
}
func (this *PartialFileWriter) ExpiredAt() int64 {
return this.expiredAt
}
func (this *PartialFileWriter) Key() string {
return this.key
}
// ItemType 获取内容类型
func (this *PartialFileWriter) ItemType() ItemType {
return ItemTypeFile
}
func (this *PartialFileWriter) IsNew() bool {
return this.isNew && len(this.ranges.Ranges) == 0
}
func (this *PartialFileWriter) Ranges() *PartialRanges {
return this.ranges
}
func (this *PartialFileWriter) remove() {
_ = fsutils.Remove(this.rawWriter.Name())
SharedPartialRangesQueue.Delete(this.rangePath)
_ = fsutils.Remove(this.rangePath)
}

View File

@@ -0,0 +1,51 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/types"
"os"
"testing"
"time"
)
func TestPartialFileWriter_Write(t *testing.T) {
var path = "/tmp/test_partial.cache"
_ = os.Remove(path)
var reader = func() {
data, err := fsutils.ReadFile(path)
if err != nil {
t.Fatal(err)
}
t.Log("["+types.String(len(data))+"]", string(data))
}
fp, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
t.Fatal(err)
}
var ranges = caches.NewPartialRanges(0)
var writer = caches.NewPartialFileWriter(fsutils.NewFile(fp, fsutils.FlagWrite), "test", time.Now().Unix()+86500, -1, -1, true, true, 0, ranges, func() {
t.Log("end")
})
_, err = writer.WriteHeader([]byte("header"))
if err != nil {
t.Fatal(err)
}
// 移动位置
err = writer.WriteAt(100, []byte("HELLO"))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
reader()
}

View File

@@ -0,0 +1,14 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Distributed under MIT license.
// See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
package cbrotli
// Inform golang build system that it should link brotli libraries.
// #cgo CFLAGS: -O2
// #cgo LDFLAGS: -lbrotlicommon
// #cgo LDFLAGS: -lbrotlidec
// #cgo LDFLAGS: -lbrotlienc
import "C"

View File

@@ -0,0 +1,163 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Distributed under MIT license.
// See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
// Package cbrotli compresses and decompresses data with C-Brotli library.
package cbrotli
/*
#include <stddef.h>
#include <stdint.h>
#include <brotli/decode.h>
static BrotliDecoderResult DecompressStream(BrotliDecoderState* s,
uint8_t* out, size_t out_len,
const uint8_t* in, size_t in_len,
size_t* bytes_written,
size_t* bytes_consumed) {
size_t in_remaining = in_len;
size_t out_remaining = out_len;
BrotliDecoderResult result = BrotliDecoderDecompressStream(
s, &in_remaining, &in, &out_remaining, &out, NULL);
*bytes_written = out_len - out_remaining;
*bytes_consumed = in_len - in_remaining;
return result;
}
*/
import "C"
import (
"bytes"
"errors"
"io"
)
type decodeError C.BrotliDecoderErrorCode
func (err decodeError) Error() string {
return "cbrotli: " +
C.GoString(C.BrotliDecoderErrorString(C.BrotliDecoderErrorCode(err)))
}
var errExcessiveInput = errors.New("cbrotli: excessive input")
var errInvalidState = errors.New("cbrotli: invalid state")
var errReaderClosed = errors.New("cbrotli: Reader is closed")
// Reader implements io.ReadCloser by reading Brotli-encoded data from an
// underlying Reader.
type Reader struct {
src io.Reader
state *C.BrotliDecoderState
buf []byte // scratch space for reading from src
in []byte // current chunk to decode; usually aliases buf
}
// readBufSize is a "good" buffer size that avoids excessive round-trips
// between C and Go but doesn't waste too much memory on buffering.
// It is arbitrarily chosen to be equal to the constant used in io.Copy.
const readBufSize = 32 * 1024
// NewReader initializes new Reader instance.
// Close MUST be called to free resources.
func NewReader(src io.Reader) *Reader {
return &Reader{
src: src,
state: C.BrotliDecoderCreateInstance(nil, nil, nil),
buf: make([]byte, readBufSize),
}
}
// Close implements io.Closer. Close MUST be invoked to free native resources.
func (r *Reader) Close() error {
if r.state == nil {
return errReaderClosed
}
// Close despite the state; i.e. there might be some unread decoded data.
C.BrotliDecoderDestroyInstance(r.state)
r.state = nil
return nil
}
func (r *Reader) Read(p []byte) (n int, err error) {
if r.state == nil {
return 0, errReaderClosed
}
if int(C.BrotliDecoderHasMoreOutput(r.state)) == 0 && len(r.in) == 0 {
m, readErr := r.src.Read(r.buf)
if m == 0 {
// If readErr is `nil`, we just proxy underlying stream behavior.
return 0, readErr
}
r.in = r.buf[:m]
}
if len(p) == 0 {
return 0, nil
}
for {
var written, consumed C.size_t
var data *C.uint8_t
if len(r.in) != 0 {
data = (*C.uint8_t)(&r.in[0])
}
result := C.DecompressStream(r.state,
(*C.uint8_t)(&p[0]), C.size_t(len(p)),
data, C.size_t(len(r.in)),
&written, &consumed)
r.in = r.in[int(consumed):]
n = int(written)
switch result {
case C.BROTLI_DECODER_RESULT_SUCCESS:
if len(r.in) > 0 {
return n, errExcessiveInput
}
return n, nil
case C.BROTLI_DECODER_RESULT_ERROR:
return n, decodeError(C.BrotliDecoderGetErrorCode(r.state))
case C.BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT:
if n == 0 {
return 0, io.ErrShortBuffer
}
return n, nil
case C.BROTLI_DECODER_NEEDS_MORE_INPUT:
}
if len(r.in) != 0 {
return 0, errInvalidState
}
// Calling r.src.Read may block. Don't block if we have data to return.
if n > 0 {
return n, nil
}
// Top off the buffer.
encN, err := r.src.Read(r.buf)
if encN == 0 {
// Not enough data to complete decoding.
if err == io.EOF {
return 0, io.ErrUnexpectedEOF
}
return 0, err
}
r.in = r.buf[:encN]
}
return n, nil
}
// Decode decodes Brotli encoded data.
func Decode(encodedData []byte) ([]byte, error) {
r := &Reader{
src: bytes.NewReader(nil),
state: C.BrotliDecoderCreateInstance(nil, nil, nil),
buf: make([]byte, 4), // arbitrarily small but nonzero so that r.src.Read returns io.EOF
in: encodedData,
}
defer r.Close()
return io.ReadAll(r)
}

View File

@@ -0,0 +1,162 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Distributed under MIT license.
// See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
package cbrotli
/*
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <brotli/encode.h>
struct CompressStreamResult {
size_t bytes_consumed;
const uint8_t* output_data;
size_t output_data_size;
int success;
int has_more;
};
static struct CompressStreamResult CompressStream(
BrotliEncoderState* s, BrotliEncoderOperation op,
const uint8_t* data, size_t data_size) {
struct CompressStreamResult result;
size_t available_in = data_size;
const uint8_t* next_in = data;
size_t available_out = 0;
result.success = BrotliEncoderCompressStream(s, op,
&available_in, &next_in, &available_out, 0, 0) ? 1 : 0;
result.bytes_consumed = data_size - available_in;
result.output_data = 0;
result.output_data_size = 0;
if (result.success) {
result.output_data = BrotliEncoderTakeOutput(s, &result.output_data_size);
}
result.has_more = BrotliEncoderHasMoreOutput(s) ? 1 : 0;
return result;
}
*/
import "C"
import (
"bytes"
"errors"
"io"
"unsafe"
)
// WriterOptions configures Writer.
type WriterOptions struct {
// Quality controls the compression-speed vs compression-density trade-offs.
// The higher the quality, the slower the compression. Range is 0 to 11.
Quality int
// LGWin is the base 2 logarithm of the sliding window size.
// Range is 10 to 24. 0 indicates automatic configuration based on Quality.
LGWin int
}
// Writer implements io.WriteCloser by writing Brotli-encoded data to an
// underlying Writer.
type Writer struct {
dst io.Writer
state *C.BrotliEncoderState
}
var (
errEncode = errors.New("cbrotli: encode error")
errWriterClosed = errors.New("cbrotli: Writer is closed")
)
// NewWriter initializes new Writer instance.
// Close MUST be called to free resources.
func NewWriter(dst io.Writer, options WriterOptions) *Writer {
state := C.BrotliEncoderCreateInstance(nil, nil, nil)
C.BrotliEncoderSetParameter(
state, C.BROTLI_PARAM_QUALITY, (C.uint32_t)(options.Quality))
if options.LGWin > 0 {
C.BrotliEncoderSetParameter(
state, C.BROTLI_PARAM_LGWIN, (C.uint32_t)(options.LGWin))
}
return &Writer{
dst: dst,
state: state,
}
}
func (w *Writer) writeChunk(p []byte, op C.BrotliEncoderOperation) (n int, err error) {
if w.state == nil {
return 0, errWriterClosed
}
for {
var data *C.uint8_t
if len(p) != 0 {
data = (*C.uint8_t)(&p[0])
}
result := C.CompressStream(w.state, op, data, C.size_t(len(p)))
if result.success == 0 {
return n, errEncode
}
p = p[int(result.bytes_consumed):]
n += int(result.bytes_consumed)
length := int(result.output_data_size)
if length != 0 {
// It is a workaround for non-copying-wrapping of native memory.
// C-encoder never pushes output block longer than ((2 << 25) + 502).
// TODO(eustas): use natural wrapper, when it becomes available, see
// https://golang.org/issue/13656.
output := (*[1 << 30]byte)(unsafe.Pointer(result.output_data))[:length:length]
_, err = w.dst.Write(output)
if err != nil {
return n, err
}
}
if len(p) == 0 && result.has_more == 0 {
return n, nil
}
}
}
// Flush outputs encoded data for all input provided to Write. The resulting
// output can be decoded to match all input before Flush, but the stream is
// not yet complete until after Close.
// Flush has a negative impact on compression.
func (w *Writer) Flush() error {
_, err := w.writeChunk(nil, C.BROTLI_OPERATION_FLUSH)
return err
}
// Close flushes remaining data to the decorated writer and frees C resources.
func (w *Writer) Close() error {
if w.state == nil {
return nil
}
// If stream is already closed, it is reported by `writeChunk`.
_, err := w.writeChunk(nil, C.BROTLI_OPERATION_FINISH)
// C-Brotli tolerates `nil` pointer here.
C.BrotliEncoderDestroyInstance(w.state)
w.state = nil
return err
}
// Write implements io.Writer. Flush or Close must be called to ensure that the
// encoded bytes are actually flushed to the underlying Writer.
func (w *Writer) Write(p []byte) (n int, err error) {
return w.writeChunk(p, C.BROTLI_OPERATION_PROCESS)
}
// Encode returns content encoded with Brotli.
func Encode(content []byte, options WriterOptions) ([]byte, error) {
var buf bytes.Buffer
writer := NewWriter(&buf, options)
_, err := writer.Write(content)
if closeErr := writer.Close(); err == nil {
err = closeErr
}
return buf.Bytes(), err
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package compressions
import "errors"
var ErrIsBusy = errors.New("the system is busy for compression")
func CanIgnore(err error) bool {
if err == nil {
return true
}
return errors.Is(err, ErrIsBusy)
}

View File

@@ -0,0 +1,16 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import "io"
type Reader interface {
Read(p []byte) (n int, err error)
Reset(reader io.Reader) error
RawClose() error
Close() error
IncreaseHit() uint32
SetPool(pool *ReaderPool)
ResetFinish()
}

View File

@@ -0,0 +1,36 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import "sync/atomic"
type BaseReader struct {
pool *ReaderPool
isFinished bool
hits uint32
}
func (this *BaseReader) SetPool(pool *ReaderPool) {
this.pool = pool
}
func (this *BaseReader) Finish(obj Reader) error {
if this.isFinished {
return nil
}
err := obj.RawClose()
if err == nil && this.pool != nil {
this.pool.Put(obj)
}
this.isFinished = true
return err
}
func (this *BaseReader) ResetFinish() {
this.isFinished = false
}
func (this *BaseReader) IncreaseHit() uint32 {
return atomic.AddUint32(&this.hits, 1)
}

View File

@@ -0,0 +1,44 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
//go:build !plus || !linux
package compressions
import (
"github.com/andybalholm/brotli"
"io"
"strings"
)
type BrotliReader struct {
BaseReader
reader *brotli.Reader
}
func NewBrotliReader(reader io.Reader) (Reader, error) {
return sharedBrotliReaderPool.Get(reader)
}
func newBrotliReader(reader io.Reader) (Reader, error) {
return &BrotliReader{reader: brotli.NewReader(reader)}, nil
}
func (this *BrotliReader) Read(p []byte) (n int, err error) {
n, err = this.reader.Read(p)
if err != nil && strings.Contains(err.Error(), "excessive") {
err = io.EOF
}
return
}
func (this *BrotliReader) Reset(reader io.Reader) error {
return this.reader.Reset(reader)
}
func (this *BrotliReader) RawClose() error {
return nil
}
func (this *BrotliReader) Close() error {
return this.Finish(this)
}

View File

@@ -0,0 +1,48 @@
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
//go:build plus && linux
package compressions
import (
"github.com/TeaOSLab/EdgeNode/internal/compressions/cbrotli"
"io"
"strings"
)
type BrotliReader struct {
BaseReader
reader *cbrotli.Reader
}
func NewBrotliReader(reader io.Reader) (Reader, error) {
return sharedBrotliReaderPool.Get(reader)
}
func newBrotliReader(reader io.Reader) (Reader, error) {
return &BrotliReader{reader: cbrotli.NewReader(reader)}, nil
}
func (this *BrotliReader) Read(p []byte) (n int, err error) {
n, err = this.reader.Read(p)
if err != nil && strings.Contains(err.Error(), "excessive") {
err = io.EOF
}
return
}
func (this *BrotliReader) Reset(reader io.Reader) error {
if reader == nil {
return nil
}
this.reader = cbrotli.NewReader(reader)
return nil
}
func (this *BrotliReader) RawClose() error {
return this.reader.Close()
}
func (this *BrotliReader) Close() error {
return this.Finish(this)
}

View File

@@ -0,0 +1,94 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"io"
"os"
"testing"
)
func TestBrotliReader(t *testing.T) {
for _, testString := range []string{"Hello", "World", "Ni", "Hao"} {
t.Log("===", testString, "===")
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte(testString))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
reader, err := compressions.NewBrotliReader(buf)
if err != nil {
t.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
t.Log(string(data[:n]))
}
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
}
func BenchmarkBrotliReader(b *testing.B) {
data, err := os.ReadFile("./reader_brotli.go")
if err != nil {
b.Fatal(err)
}
var buf = bytes.NewBuffer([]byte{})
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
_, err = writer.Write(data)
err = writer.Close()
if err != nil {
b.Fatal(err)
}
var compressedData = buf.Bytes()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
reader, readerErr := compressions.NewBrotliReader(bytes.NewBuffer(compressedData))
if readerErr != nil {
b.Fatal(readerErr)
}
var readBuf = make([]byte, 1024)
for {
_, readErr := reader.Read(readBuf)
if readErr != nil {
if readErr != io.EOF {
b.Fatal(readErr)
}
break
}
}
closeErr := reader.Close()
if closeErr != nil {
b.Fatal(closeErr)
}
}
})
}

View File

@@ -0,0 +1,39 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"compress/flate"
"io"
)
type DeflateReader struct {
BaseReader
reader io.ReadCloser
}
func NewDeflateReader(reader io.Reader) (Reader, error) {
return sharedDeflateReaderPool.Get(reader)
}
func newDeflateReader(reader io.Reader) (Reader, error) {
return &DeflateReader{reader: flate.NewReader(reader)}, nil
}
func (this *DeflateReader) Read(p []byte) (n int, err error) {
return this.reader.Read(p)
}
func (this *DeflateReader) Reset(reader io.Reader) error {
this.reader = flate.NewReader(reader)
return nil
}
func (this *DeflateReader) RawClose() error {
return this.reader.Close()
}
func (this *DeflateReader) Close() error {
return this.Finish(this)
}

View File

@@ -0,0 +1,51 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"io"
"testing"
)
func TestDeflateReader(t *testing.T) {
for _, testString := range []string{"Hello", "World", "Ni", "Hao"} {
t.Log("===", testString, "===")
var buf = &bytes.Buffer{}
writer, err := compressions.NewDeflateWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte(testString))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
reader, err := compressions.NewDeflateReader(buf)
if err != nil {
t.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
t.Log(string(data[:n]))
}
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,44 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"github.com/klauspost/compress/gzip"
"io"
)
type GzipReader struct {
BaseReader
reader *gzip.Reader
}
func NewGzipReader(reader io.Reader) (Reader, error) {
return sharedGzipReaderPool.Get(reader)
}
func newGzipReader(reader io.Reader) (Reader, error) {
r, err := gzip.NewReader(reader)
if err != nil {
return nil, err
}
return &GzipReader{
reader: r,
}, nil
}
func (this *GzipReader) Read(p []byte) (n int, err error) {
return this.reader.Read(p)
}
func (this *GzipReader) Reset(reader io.Reader) error {
return this.reader.Reset(reader)
}
func (this *GzipReader) RawClose() error {
return this.reader.Close()
}
func (this *GzipReader) Close() error {
return this.Finish(this)
}

View File

@@ -0,0 +1,106 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
"io"
"strings"
"testing"
)
func TestGzipReader(t *testing.T) {
for _, testString := range []string{"Hello", "World", "Ni", "Hao"} {
t.Log("===", testString, "===")
var buf = &bytes.Buffer{}
writer, err := compressions.NewGzipWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte(testString))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
reader, err := compressions.NewGzipReader(buf)
if err != nil {
t.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
t.Log(string(data[:n]))
}
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
}
func BenchmarkGzipReader(b *testing.B) {
var randomData = func() []byte {
var b = strings.Builder{}
for i := 0; i < 1024; i++ {
b.WriteString(types.String(rands.Int64() % 10))
}
return []byte(b.String())
}
var buf = &bytes.Buffer{}
writer, err := compressions.NewGzipWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
_, err = writer.Write(randomData())
if err != nil {
b.Fatal(err)
}
err = writer.Close()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var newBytes = make([]byte, buf.Len())
copy(newBytes, buf.Bytes())
reader, err := compressions.NewGzipReader(bytes.NewReader(newBytes))
if err != nil {
b.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
_ = data[:n]
}
if err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
err = reader.Close()
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,63 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"io"
)
const maxReadHits = 1 << 20
type ReaderPool struct {
c chan Reader
newFunc func(reader io.Reader) (Reader, error)
}
func NewReaderPool(maxSize int, newFunc func(reader io.Reader) (Reader, error)) *ReaderPool {
if maxSize <= 0 {
maxSize = 1024
}
return &ReaderPool{
c: make(chan Reader, maxSize),
newFunc: newFunc,
}
}
func (this *ReaderPool) Get(parentReader io.Reader) (Reader, error) {
select {
case reader := <-this.c:
err := reader.Reset(parentReader)
if err != nil {
// create new
reader, err = this.newFunc(parentReader)
if err != nil {
return nil, err
}
reader.SetPool(this)
return reader, nil
}
reader.ResetFinish()
return reader, nil
default:
// create new
reader, err := this.newFunc(parentReader)
if err != nil {
return nil, err
}
reader.SetPool(this)
return reader, nil
}
}
func (this *ReaderPool) Put(reader Reader) {
if reader.IncreaseHit() > maxReadHits {
// do nothing to discard it
return
}
select {
case this.c <- reader:
default:
}
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"io"
)
var sharedBrotliReaderPool *ReaderPool
func init() {
if !teaconst.IsMain {
return
}
sharedBrotliReaderPool = NewReaderPool(CalculatePoolSize(), func(reader io.Reader) (Reader, error) {
return newBrotliReader(reader)
})
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"io"
)
var sharedDeflateReaderPool *ReaderPool
func init() {
if !teaconst.IsMain {
return
}
sharedDeflateReaderPool = NewReaderPool(CalculatePoolSize(), func(reader io.Reader) (Reader, error) {
return newDeflateReader(reader)
})
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"io"
)
var sharedGzipReaderPool *ReaderPool
func init() {
if !teaconst.IsMain {
return
}
sharedGzipReaderPool = NewReaderPool(CalculatePoolSize(), func(reader io.Reader) (Reader, error) {
return newGzipReader(reader)
})
}

View File

@@ -0,0 +1,20 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"io"
)
var sharedZSTDReaderPool *ReaderPool
func init() {
if !teaconst.IsMain {
return
}
sharedZSTDReaderPool = NewReaderPool(CalculatePoolSize(), func(reader io.Reader) (Reader, error) {
return newZSTDReader(reader)
})
}

View File

@@ -0,0 +1,45 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"github.com/klauspost/compress/zstd"
"io"
)
type ZSTDReader struct {
BaseReader
reader *zstd.Decoder
}
func NewZSTDReader(reader io.Reader) (Reader, error) {
return sharedZSTDReaderPool.Get(reader)
}
func newZSTDReader(reader io.Reader) (Reader, error) {
r, err := zstd.NewReader(reader, zstd.WithDecoderMaxWindow(256<<20))
if err != nil {
return nil, err
}
return &ZSTDReader{
reader: r,
}, nil
}
func (this *ZSTDReader) Read(p []byte) (n int, err error) {
return this.reader.Read(p)
}
func (this *ZSTDReader) Reset(reader io.Reader) error {
return this.reader.Reset(reader)
}
func (this *ZSTDReader) RawClose() error {
this.reader.Close()
return nil
}
func (this *ZSTDReader) Close() error {
return this.Finish(this)
}

View File

@@ -0,0 +1,106 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
"io"
"strings"
"testing"
)
func TestZSTDReader(t *testing.T) {
for _, testString := range []string{"Hello", "World", "Ni", "Hao"} {
t.Log("===", testString, "===")
var buf = &bytes.Buffer{}
writer, err := compressions.NewZSTDWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte(testString))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
reader, err := compressions.NewZSTDReader(buf)
if err != nil {
t.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
t.Log(string(data[:n]))
}
if err != nil {
if err == io.EOF {
break
}
t.Fatal(err)
}
}
err = reader.Close()
if err != nil {
t.Fatal(err)
}
}
}
func BenchmarkZSTDReader(b *testing.B) {
var randomData = func() []byte {
var b = strings.Builder{}
for i := 0; i < 1024; i++ {
b.WriteString(types.String(rands.Int64() % 10))
}
return []byte(b.String())
}
var buf = &bytes.Buffer{}
writer, err := compressions.NewZSTDWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
_, err = writer.Write(randomData())
if err != nil {
b.Fatal(err)
}
err = writer.Close()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
var newBytes = make([]byte, buf.Len())
copy(newBytes, buf.Bytes())
reader, err := compressions.NewZSTDReader(bytes.NewReader(newBytes))
if err != nil {
b.Fatal(err)
}
var data = make([]byte, 4096)
for {
n, err := reader.Read(data)
if n > 0 {
_ = data[:n]
}
if err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
err = reader.Close()
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -0,0 +1,120 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"errors"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"io"
"net/http"
"runtime"
)
type ContentEncoding = string
const (
ContentEncodingBr ContentEncoding = "br"
ContentEncodingGzip ContentEncoding = "gzip"
ContentEncodingDeflate ContentEncoding = "deflate"
ContentEncodingZSTD ContentEncoding = "zstd"
)
var ErrNotSupportedContentEncoding = errors.New("not supported content encoding")
// AllEncodings 当前支持的所有编码
func AllEncodings() []ContentEncoding {
return []ContentEncoding{
ContentEncodingBr,
ContentEncodingGzip,
ContentEncodingZSTD,
ContentEncodingDeflate,
}
}
// NewReader 获取Reader
func NewReader(reader io.Reader, contentEncoding ContentEncoding) (Reader, error) {
switch contentEncoding {
case ContentEncodingBr:
return NewBrotliReader(reader)
case ContentEncodingGzip:
return NewGzipReader(reader)
case ContentEncodingDeflate:
return NewDeflateReader(reader)
case ContentEncodingZSTD:
return NewZSTDReader(reader)
}
return nil, ErrNotSupportedContentEncoding
}
// NewWriter 获取Writer
func NewWriter(writer io.Writer, compressType serverconfigs.HTTPCompressionType, level int) (Writer, error) {
switch compressType {
case serverconfigs.HTTPCompressionTypeGzip:
return NewGzipWriter(writer, level)
case serverconfigs.HTTPCompressionTypeDeflate:
return NewDeflateWriter(writer, level)
case serverconfigs.HTTPCompressionTypeBrotli:
return NewBrotliWriter(writer, level)
case serverconfigs.HTTPCompressionTypeZSTD:
return NewZSTDWriter(writer, level)
}
return nil, errors.New("invalid compression type '" + compressType + "'")
}
// SupportEncoding 检查是否支持某个编码
func SupportEncoding(encoding string) bool {
return encoding == ContentEncodingBr ||
encoding == ContentEncodingGzip ||
encoding == ContentEncodingDeflate ||
encoding == ContentEncodingZSTD
}
// WrapHTTPResponse 包装http.Response对象
func WrapHTTPResponse(resp *http.Response) {
if resp == nil {
return
}
var contentEncoding = resp.Header.Get("Content-Encoding")
if len(contentEncoding) == 0 || !SupportEncoding(contentEncoding) {
return
}
reader, err := NewReader(resp.Body, contentEncoding)
if err != nil {
// unable to decode, we ignore the error
return
}
resp.Header.Del("Content-Encoding")
resp.Header.Del("Content-Length")
resp.Body = reader
}
// 系统CPU线程数
var countCPU = runtime.NumCPU()
// GenerateCompressLevel 根据系统资源自动生成压缩级别
func GenerateCompressLevel(minLevel int, maxLevel int) (level int) {
if countCPU < 16 {
return minLevel
}
if countCPU < 32 {
return min(3, maxLevel)
}
return min(5, maxLevel)
}
// CalculatePoolSize 计算Pool尺寸
func CalculatePoolSize() int {
var maxSize = memutils.SystemMemoryGB() * 32
if maxSize == 0 {
maxSize = 128
}
if maxSize > 2048 {
maxSize = 2048
}
return maxSize
}

View File

@@ -0,0 +1,27 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package compressions_test
import (
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestGenerateCompressLevel(t *testing.T) {
var a = assert.NewAssertion(t)
t.Log(compressions.GenerateCompressLevel(0, 10))
t.Log(compressions.GenerateCompressLevel(1, 10))
t.Log(compressions.GenerateCompressLevel(1, 4))
{
var level = compressions.GenerateCompressLevel(1, 2)
t.Log(level)
a.IsTrue(level >= 1 && level <= 2)
}
}
func TestCalculatePoolSize(t *testing.T) {
t.Log(compressions.CalculatePoolSize())
}

View File

@@ -0,0 +1,18 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import "io"
type Writer interface {
Write(p []byte) (int, error)
Flush() error
Reset(writer io.Writer)
RawClose() error
Close() error
Level() int
IncreaseHit() uint32
SetPool(pool *WriterPool)
ResetFinish()
}

View File

@@ -0,0 +1,39 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"sync/atomic"
)
type BaseWriter struct {
pool *WriterPool
isFinished bool
hits uint32
}
func (this *BaseWriter) SetPool(pool *WriterPool) {
this.pool = pool
}
func (this *BaseWriter) Finish(obj Writer) error {
if this.isFinished {
return nil
}
err := obj.RawClose()
if err == nil && this.pool != nil {
this.pool.Put(obj)
}
this.isFinished = true
return err
}
func (this *BaseWriter) ResetFinish() {
this.isFinished = false
}
func (this *BaseWriter) IncreaseHit() uint32 {
return atomic.AddUint32(&this.hits, 1)
}

View File

@@ -0,0 +1,55 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
//go:build !plus || !linux
package compressions
import (
"github.com/andybalholm/brotli"
"io"
)
type BrotliWriter struct {
BaseWriter
writer *brotli.Writer
level int
}
func NewBrotliWriter(writer io.Writer, level int) (Writer, error) {
return sharedBrotliWriterPool.Get(writer, level)
}
func newBrotliWriter(writer io.Writer) (*BrotliWriter, error) {
var level = GenerateCompressLevel(brotli.BestSpeed, brotli.BestCompression)
return &BrotliWriter{
writer: brotli.NewWriterOptions(writer, brotli.WriterOptions{
Quality: level,
LGWin: 14, // TODO 在全局设置里可以设置此值
}),
level: level,
}, nil
}
func (this *BrotliWriter) Write(p []byte) (int, error) {
return this.writer.Write(p)
}
func (this *BrotliWriter) Flush() error {
return this.writer.Flush()
}
func (this *BrotliWriter) Reset(newWriter io.Writer) {
this.writer.Reset(newWriter)
}
func (this *BrotliWriter) RawClose() error {
return this.writer.Close()
}
func (this *BrotliWriter) Close() error {
return this.Finish(this)
}
func (this *BrotliWriter) Level() int {
return this.level
}

View File

@@ -0,0 +1,71 @@
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
//go:build plus && linux
package compressions
import (
"github.com/TeaOSLab/EdgeNode/internal/compressions/cbrotli"
"io"
)
const (
BrotliBestSpeed = 0
BrotliBestCompression = 11
BrotliDefaultLGWin = 14
)
type BrotliWriter struct {
BaseWriter
writer *cbrotli.Writer
level int
}
func NewBrotliWriter(writer io.Writer, level int) (Writer, error) {
return sharedBrotliWriterPool.Get(writer, level)
}
func newBrotliWriter(writer io.Writer) (*BrotliWriter, error) {
var level = GenerateCompressLevel(BrotliBestSpeed, BrotliBestCompression)
return &BrotliWriter{
writer: cbrotli.NewWriter(writer, cbrotli.WriterOptions{
Quality: level,
LGWin: BrotliDefaultLGWin,
}),
level: level,
}, nil
}
func (this *BrotliWriter) Write(p []byte) (int, error) {
return this.writer.Write(p)
}
func (this *BrotliWriter) Flush() error {
return this.writer.Flush()
}
func (this *BrotliWriter) Reset(newWriter io.Writer) {
_ = this.writer.Close()
if newWriter == nil {
return
}
this.writer = cbrotli.NewWriter(newWriter, cbrotli.WriterOptions{
Quality: this.level,
LGWin: BrotliDefaultLGWin,
})
}
func (this *BrotliWriter) RawClose() error {
return this.writer.Close()
}
func (this *BrotliWriter) Close() error {
return this.Finish(this)
}
func (this *BrotliWriter) Level() int {
return this.level
}

View File

@@ -0,0 +1,152 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
stringutil "github.com/iwind/TeaGo/utils/string"
"strings"
"testing"
"time"
)
func TestBrotliWriter_LargeFile(t *testing.T) {
var data = []byte{}
for i := 0; i < 1024*1024; i++ {
data = append(data, stringutil.Rand(32)...)
}
t.Log(len(data)/1024/1024, "M")
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
t.Fatal(err)
}
var offset = 0
var size = 4096
for offset < len(data) {
_, err = writer.Write(data[offset : offset+size])
if err != nil {
t.Fatal(err)
}
offset += size
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
}
func BenchmarkBrotliWriter_Write(b *testing.B) {
var data = []byte(strings.Repeat("A", 1024))
for i := 0; i < b.N; i++ {
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
for j := 0; j < 100; j++ {
_, err = writer.Write(data)
if err != nil {
b.Fatal(err)
}
/**err = writer.Flush()
if err != nil {
b.Fatal(err)
}**/
}
_ = writer.Close()
}
}
func BenchmarkBrotliWriter_Write_Parallel(b *testing.B) {
var data = []byte(strings.Repeat("A", 1024))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
for j := 0; j < 100; j++ {
_, err = writer.Write(data)
if err != nil {
b.Fatal(err)
}
/**err = writer.Flush()
if err != nil {
b.Fatal(err)
}**/
}
_ = writer.Close()
}
})
}
func BenchmarkBrotliWriter_Write_Small(b *testing.B) {
var data = []byte(strings.Repeat("A", 16))
for i := 0; i < b.N; i++ {
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
for j := 0; j < 100; j++ {
_, err = writer.Write(data)
if err != nil {
b.Fatal(err)
}
/**err = writer.Flush()
if err != nil {
b.Fatal(err)
}**/
}
_ = writer.Close()
}
}
func BenchmarkBrotliWriter_Write_Large(b *testing.B) {
var data = []byte(strings.Repeat("A", 4096))
for i := 0; i < b.N; i++ {
var buf = &bytes.Buffer{}
writer, err := compressions.NewBrotliWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
for j := 0; j < 100; j++ {
_, err = writer.Write(data)
if err != nil {
b.Fatal(err)
}
/**err = writer.Flush()
if err != nil {
b.Fatal(err)
}**/
}
_ = writer.Close()
}
}

View File

@@ -0,0 +1,57 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"compress/flate"
"io"
)
type DeflateWriter struct {
BaseWriter
writer *flate.Writer
level int
}
func NewDeflateWriter(writer io.Writer, level int) (Writer, error) {
return sharedDeflateWriterPool.Get(writer, level)
}
func newDeflateWriter(writer io.Writer) (Writer, error) {
var level = GenerateCompressLevel(flate.BestSpeed, flate.BestCompression)
flateWriter, err := flate.NewWriter(writer, level)
if err != nil {
return nil, err
}
return &DeflateWriter{
writer: flateWriter,
level: level,
}, nil
}
func (this *DeflateWriter) Write(p []byte) (int, error) {
return this.writer.Write(p)
}
func (this *DeflateWriter) Flush() error {
return this.writer.Flush()
}
func (this *DeflateWriter) Reset(writer io.Writer) {
this.writer.Reset(writer)
}
func (this *DeflateWriter) RawClose() error {
return this.writer.Close()
}
func (this *DeflateWriter) Close() error {
return this.Finish(this)
}
func (this *DeflateWriter) Level() int {
return this.level
}

View File

@@ -0,0 +1,36 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions_test
import (
"bytes"
"github.com/TeaOSLab/EdgeNode/internal/compressions"
"strings"
"testing"
)
func BenchmarkDeflateWriter_Write(b *testing.B) {
var data = []byte(strings.Repeat("A", 1024))
for i := 0; i < b.N; i++ {
var buf = &bytes.Buffer{}
writer, err := compressions.NewDeflateWriter(buf, 5)
if err != nil {
b.Fatal(err)
}
for j := 0; j < 100; j++ {
_, err = writer.Write(data)
if err != nil {
b.Fatal(err)
}
/**err = writer.Flush()
if err != nil {
b.Fatal(err)
}**/
}
_ = writer.Close()
}
}

View File

@@ -0,0 +1,57 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package compressions
import (
"github.com/klauspost/compress/gzip"
"io"
)
type GzipWriter struct {
BaseWriter
writer *gzip.Writer
level int
}
func NewGzipWriter(writer io.Writer, level int) (Writer, error) {
return sharedGzipWriterPool.Get(writer, level)
}
func newGzipWriter(writer io.Writer) (Writer, error) {
var level = GenerateCompressLevel(gzip.BestSpeed, gzip.BestCompression)
gzipWriter, err := gzip.NewWriterLevel(writer, level)
if err != nil {
return nil, err
}
return &GzipWriter{
writer: gzipWriter,
level: level,
}, nil
}
func (this *GzipWriter) Write(p []byte) (int, error) {
return this.writer.Write(p)
}
func (this *GzipWriter) Flush() error {
return this.writer.Flush()
}
func (this *GzipWriter) Reset(writer io.Writer) {
this.writer.Reset(writer)
}
func (this *GzipWriter) RawClose() error {
return this.writer.Close()
}
func (this *GzipWriter) Close() error {
return this.Finish(this)
}
func (this *GzipWriter) Level() int {
return this.level
}

Some files were not shown because too many files have changed in this diff Show More