Initial commit (code only without large binaries)
This commit is contained in:
39
EdgeNode/internal/utils/agents/agent.go
Normal file
39
EdgeNode/internal/utils/agents/agent.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
Code string
|
||||
Keywords []string // user agent keywords
|
||||
|
||||
suffixes []string // PTR suffixes
|
||||
reg *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewAgent(code string, suffixes []string, reg *regexp.Regexp, keywords []string) *Agent {
|
||||
return &Agent{
|
||||
Code: code,
|
||||
suffixes: suffixes,
|
||||
reg: reg,
|
||||
Keywords: keywords,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Agent) Match(ptr string) bool {
|
||||
if len(this.suffixes) > 0 {
|
||||
for _, suffix := range this.suffixes {
|
||||
if strings.HasSuffix(ptr, suffix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if this.reg != nil {
|
||||
return this.reg.MatchString(ptr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
9
EdgeNode/internal/utils/agents/agent_ip.go
Normal file
9
EdgeNode/internal/utils/agents/agent_ip.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
type AgentIP struct {
|
||||
Id int64 `json:"id"`
|
||||
IP string `json:"ip"`
|
||||
AgentCode string `json:"agentCode"`
|
||||
}
|
||||
32
EdgeNode/internal/utils/agents/agents.go
Normal file
32
EdgeNode/internal/utils/agents/agents.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import "strings"
|
||||
|
||||
var AllAgents = []*Agent{
|
||||
NewAgent("baidu", []string{".baidu.com."}, nil, []string{"Baidu"}),
|
||||
NewAgent("google", []string{".googlebot.com."}, nil, []string{"Google"}),
|
||||
NewAgent("bing", []string{".search.msn.com."}, nil, []string{"bingbot"}),
|
||||
NewAgent("sogou", []string{".sogou.com."}, nil, []string{"Sogou"}),
|
||||
NewAgent("youdao", []string{".163.com."}, nil, []string{"Youdao"}),
|
||||
NewAgent("yahoo", []string{".yahoo.com."}, nil, []string{"Yahoo"}),
|
||||
NewAgent("bytedance", []string{".bytedance.com."}, nil, []string{"Bytespider"}),
|
||||
NewAgent("sm", []string{".sm.cn."}, nil, []string{"YisouSpider"}),
|
||||
NewAgent("yandex", []string{".yandex.com.", ".yndx.net."}, nil, []string{"Yandex"}),
|
||||
NewAgent("semrush", []string{".semrush.com."}, nil, []string{"SEMrush"}),
|
||||
NewAgent("facebook", []string{"facebook-waw.1-ix.net.", "facebook.b-ix.net."}, nil, []string{"facebook"}),
|
||||
}
|
||||
|
||||
func IsAgentFromUserAgent(userAgent string) bool {
|
||||
for _, agent := range AllAgents {
|
||||
if len(agent.Keywords) > 0 {
|
||||
for _, keyword := range agent.Keywords {
|
||||
if strings.Contains(userAgent, keyword) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
19
EdgeNode/internal/utils/agents/agents_test.go
Normal file
19
EdgeNode/internal/utils/agents/agents_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/agents"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsAgentFromUserAgent(t *testing.T) {
|
||||
t.Log(agents.IsAgentFromUserAgent("Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"))
|
||||
t.Log(agents.IsAgentFromUserAgent("Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;)"))
|
||||
}
|
||||
|
||||
func BenchmarkIsAgentFromUserAgent(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
agents.IsAgentFromUserAgent("Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3 (compatible; Yaho)")
|
||||
}
|
||||
}
|
||||
9
EdgeNode/internal/utils/agents/db.go
Normal file
9
EdgeNode/internal/utils/agents/db.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
type DB interface {
|
||||
Init() error
|
||||
InsertAgentIP(ipId int64, ip string, agentCode string) error
|
||||
ListAgentIPs(offset int64, size int64) (agentIPs []*AgentIP, err error)
|
||||
}
|
||||
93
EdgeNode/internal/utils/agents/db_kv.go
Normal file
93
EdgeNode/internal/utils/agents/db_kv.go
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/kvstore"
|
||||
)
|
||||
|
||||
type KVDB struct {
|
||||
table *kvstore.Table[*AgentIP]
|
||||
encoder *AgentIPEncoder[*AgentIP]
|
||||
lastKey string
|
||||
}
|
||||
|
||||
func NewKVDB() *KVDB {
|
||||
var db = &KVDB{}
|
||||
|
||||
events.OnClose(func() {
|
||||
_ = db.Close()
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (this *KVDB) Init() error {
|
||||
store, err := kvstore.DefaultStore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db, err := store.NewDB("agents")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
{
|
||||
this.encoder = &AgentIPEncoder[*AgentIP]{}
|
||||
table, tableErr := kvstore.NewTable[*AgentIP]("agent_ips", this.encoder)
|
||||
if tableErr != nil {
|
||||
return tableErr
|
||||
}
|
||||
db.AddTable(table)
|
||||
this.table = table
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *KVDB) InsertAgentIP(ipId int64, ip string, agentCode string) error {
|
||||
if this.table == nil {
|
||||
return errors.New("table should not be nil")
|
||||
}
|
||||
|
||||
var item = &AgentIP{
|
||||
Id: ipId,
|
||||
IP: ip,
|
||||
AgentCode: agentCode,
|
||||
}
|
||||
var key = this.encoder.EncodeKey(item)
|
||||
return this.table.Set(key, item)
|
||||
}
|
||||
|
||||
func (this *KVDB) ListAgentIPs(offset int64, size int64) (agentIPs []*AgentIP, err error) {
|
||||
if this.table == nil {
|
||||
return nil, errors.New("table should not be nil")
|
||||
}
|
||||
|
||||
err = this.table.
|
||||
Query().
|
||||
Limit(int(size)).
|
||||
Offset(this.lastKey).
|
||||
FindAll(func(tx *kvstore.Tx[*AgentIP], item kvstore.Item[*AgentIP]) (goNext bool, err error) {
|
||||
this.lastKey = item.Key
|
||||
agentIPs = append(agentIPs, item.Value)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *KVDB) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *KVDB) Flush() error {
|
||||
if this.table == nil {
|
||||
return errors.New("table should not be nil")
|
||||
}
|
||||
|
||||
return this.table.DB().Store().Flush()
|
||||
}
|
||||
36
EdgeNode/internal/utils/agents/db_kv_objects.go
Normal file
36
EdgeNode/internal/utils/agents/db_kv_objects.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type AgentIPEncoder[T interface{ *AgentIP }] struct {
|
||||
}
|
||||
|
||||
func (this *AgentIPEncoder[T]) Encode(value T) ([]byte, error) {
|
||||
return json.Marshal(value)
|
||||
}
|
||||
|
||||
func (this *AgentIPEncoder[T]) EncodeField(value T, fieldName string) ([]byte, error) {
|
||||
return nil, errors.New("invalid field name '" + fieldName + "'")
|
||||
}
|
||||
|
||||
func (this *AgentIPEncoder[T]) Decode(valueBytes []byte) (value T, err error) {
|
||||
err = json.Unmarshal(valueBytes, &value)
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeKey generate key for ip item
|
||||
func (this *AgentIPEncoder[T]) EncodeKey(item *AgentIP) string {
|
||||
var b = make([]byte, 8)
|
||||
if item.Id < 0 {
|
||||
item.Id = 0
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(b, uint64(item.Id))
|
||||
return string(b)
|
||||
}
|
||||
53
EdgeNode/internal/utils/agents/db_kv_test.go
Normal file
53
EdgeNode/internal/utils/agents/db_kv_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/agents"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKVDB_InsertAgentIP(t *testing.T) {
|
||||
var db = agents.NewKVDB()
|
||||
err := db.Init()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = db.Flush()
|
||||
}()
|
||||
|
||||
for i := 1; i <= 5; i++ {
|
||||
err = db.InsertAgentIP(int64(i), "192.168.2."+strconv.Itoa(i), "example")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDB_ListAgentIPs(t *testing.T) {
|
||||
var db = agents.NewKVDB()
|
||||
err := db.Init()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const count = 10
|
||||
|
||||
for {
|
||||
agentIPs, listErr := db.ListAgentIPs(0, count)
|
||||
if listErr != nil {
|
||||
t.Fatal(listErr)
|
||||
}
|
||||
t.Log("===")
|
||||
for _, agentIP := range agentIPs {
|
||||
t.Logf("%+v", agentIP)
|
||||
}
|
||||
|
||||
if len(agentIPs) < count {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
160
EdgeNode/internal/utils/agents/db_sqlite.go
Normal file
160
EdgeNode/internal/utils/agents/db_sqlite.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
tableAgentIPs = "agentIPs"
|
||||
)
|
||||
|
||||
type SQLiteDB struct {
|
||||
db *dbs.DB
|
||||
path string
|
||||
|
||||
insertAgentIPStmt *dbs.Stmt
|
||||
listAgentIPsStmt *dbs.Stmt
|
||||
}
|
||||
|
||||
func NewSQLiteDB(path string) *SQLiteDB {
|
||||
var db = &SQLiteDB{path: path}
|
||||
|
||||
events.OnClose(func() {
|
||||
_ = db.Close()
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (this *SQLiteDB) Init() error {
|
||||
// 检查目录是否存在
|
||||
var dir = filepath.Dir(this.path)
|
||||
|
||||
_, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
err = os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remotelogs.Println("DB", "create database dir '"+dir+"'")
|
||||
}
|
||||
|
||||
// TODO 思考 data.db 的数据安全性
|
||||
db, err := dbs.OpenWriter("file:" + this.path + "?cache=shared&mode=rwc&_journal_mode=WAL&_locking_mode=EXCLUSIVE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
/**_, err = db.Exec("VACUUM")
|
||||
if err != nil {
|
||||
return err
|
||||
}**/
|
||||
|
||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS "` + tableAgentIPs + `" (
|
||||
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
"ip" varchar(64),
|
||||
"agentCode" varchar(128)
|
||||
);`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 预编译语句
|
||||
|
||||
// agent ip record statements
|
||||
this.insertAgentIPStmt, err = db.Prepare(`INSERT INTO "` + tableAgentIPs + `" ("id", "ip", "agentCode") VALUES (?, ?, ?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
this.listAgentIPsStmt, err = db.Prepare(`SELECT "id", "ip", "agentCode" FROM "` + tableAgentIPs + `" ORDER BY "id" ASC LIMIT ? OFFSET ?`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
this.db = db
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *SQLiteDB) InsertAgentIP(ipId int64, ip string, agentCode string) error {
|
||||
if this.db == nil {
|
||||
return errors.New("db should not be nil")
|
||||
}
|
||||
|
||||
_, err := this.insertAgentIPStmt.Exec(ipId, ip, agentCode)
|
||||
if err != nil {
|
||||
// 不提示ID重复错误
|
||||
if strings.Contains(err.Error(), "UNIQUE constraint") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *SQLiteDB) ListAgentIPs(offset int64, size int64) (agentIPs []*AgentIP, err error) {
|
||||
if this.db == nil {
|
||||
return nil, errors.New("db should not be nil")
|
||||
}
|
||||
rows, err := this.listAgentIPsStmt.Query(size, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = rows.Close()
|
||||
}()
|
||||
for rows.Next() {
|
||||
var agentIP = &AgentIP{}
|
||||
err = rows.Scan(&agentIP.Id, &agentIP.IP, &agentIP.AgentCode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
agentIPs = append(agentIPs, agentIP)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (this *SQLiteDB) Close() error {
|
||||
if this.db == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, stmt := range []*dbs.Stmt{
|
||||
this.insertAgentIPStmt,
|
||||
this.listAgentIPsStmt,
|
||||
} {
|
||||
if stmt != nil {
|
||||
_ = stmt.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return this.db.Close()
|
||||
}
|
||||
|
||||
// 打印日志
|
||||
func (this *SQLiteDB) log(args ...any) {
|
||||
if !Tea.IsTesting() {
|
||||
return
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
args[0] = "[" + types.String(args[0]) + "]"
|
||||
log.Println(args...)
|
||||
}
|
||||
54
EdgeNode/internal/utils/agents/ip_cache_map.go
Normal file
54
EdgeNode/internal/utils/agents/ip_cache_map.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type IPCacheMap struct {
|
||||
m map[string]zero.Zero
|
||||
list []string
|
||||
|
||||
locker sync.RWMutex
|
||||
maxLen int
|
||||
}
|
||||
|
||||
func NewIPCacheMap(maxLen int) *IPCacheMap {
|
||||
if maxLen <= 0 {
|
||||
maxLen = 65535
|
||||
}
|
||||
return &IPCacheMap{
|
||||
m: map[string]zero.Zero{},
|
||||
maxLen: maxLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *IPCacheMap) Add(ip string) {
|
||||
this.locker.Lock()
|
||||
defer this.locker.Unlock()
|
||||
|
||||
// 是否已经存在
|
||||
_, ok := this.m[ip]
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
// 超出长度删除第一个
|
||||
if len(this.list) >= this.maxLen {
|
||||
delete(this.m, this.list[0])
|
||||
this.list = this.list[1:]
|
||||
}
|
||||
|
||||
// 加入新数据
|
||||
this.m[ip] = zero.Zero{}
|
||||
this.list = append(this.list, ip)
|
||||
}
|
||||
|
||||
func (this *IPCacheMap) Contains(ip string) bool {
|
||||
this.locker.RLock()
|
||||
defer this.locker.RUnlock()
|
||||
_, ok := this.m[ip]
|
||||
return ok
|
||||
}
|
||||
33
EdgeNode/internal/utils/agents/ip_cache_map_test.go
Normal file
33
EdgeNode/internal/utils/agents/ip_cache_map_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewIPCacheMap(t *testing.T) {
|
||||
var cacheMap = NewIPCacheMap(3)
|
||||
|
||||
t.Log("====")
|
||||
cacheMap.Add("1")
|
||||
cacheMap.Add("2")
|
||||
logs.PrintAsJSON(cacheMap.m, t)
|
||||
logs.PrintAsJSON(cacheMap.list, t)
|
||||
|
||||
t.Log("====")
|
||||
cacheMap.Add("3")
|
||||
logs.PrintAsJSON(cacheMap.m, t)
|
||||
logs.PrintAsJSON(cacheMap.list, t)
|
||||
|
||||
t.Log("====")
|
||||
cacheMap.Add("4")
|
||||
logs.PrintAsJSON(cacheMap.m, t)
|
||||
logs.PrintAsJSON(cacheMap.list, t)
|
||||
|
||||
t.Log("====")
|
||||
cacheMap.Add("3")
|
||||
logs.PrintAsJSON(cacheMap.m, t)
|
||||
logs.PrintAsJSON(cacheMap.list, t)
|
||||
}
|
||||
213
EdgeNode/internal/utils/agents/manager.go
Normal file
213
EdgeNode/internal/utils/agents/manager.go
Normal file
@@ -0,0 +1,213 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/rpc"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var SharedManager = NewManager()
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
events.On(events.EventLoaded, func() {
|
||||
goman.New(func() {
|
||||
SharedManager.Start()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Manager Agent管理器
|
||||
type Manager struct {
|
||||
ipMap map[string]string // ip => agentCode
|
||||
locker sync.RWMutex
|
||||
|
||||
db DB
|
||||
|
||||
lastId int64
|
||||
}
|
||||
|
||||
func NewManager() *Manager {
|
||||
return &Manager{
|
||||
ipMap: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Manager) SetDB(db DB) {
|
||||
this.db = db
|
||||
}
|
||||
|
||||
func (this *Manager) Start() {
|
||||
remotelogs.Println("AGENT_MANAGER", "starting ...")
|
||||
|
||||
err := this.loadDB()
|
||||
if err != nil {
|
||||
remotelogs.Error("AGENT_MANAGER", "load database failed: "+err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// 从本地数据库中加载
|
||||
err = this.Load()
|
||||
if err != nil {
|
||||
remotelogs.Error("AGENT_MANAGER", "load failed: "+err.Error())
|
||||
}
|
||||
|
||||
// 先从API获取
|
||||
err = this.LoopAll()
|
||||
if err != nil {
|
||||
if rpc.IsConnError(err) {
|
||||
remotelogs.Debug("AGENT_MANAGER", "retrieve latest agent ip failed: "+err.Error())
|
||||
} else {
|
||||
remotelogs.Error("AGENT_MANAGER", "retrieve latest agent ip failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// 定时获取
|
||||
var duration = 30 * time.Minute
|
||||
if Tea.IsTesting() {
|
||||
duration = 30 * time.Second
|
||||
}
|
||||
var ticker = time.NewTicker(duration)
|
||||
for range ticker.C {
|
||||
err = this.LoopAll()
|
||||
if err != nil {
|
||||
remotelogs.Error("AGENT_MANAGER", "retrieve latest agent ip failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Manager) Load() error {
|
||||
var offset int64 = 0
|
||||
var size int64 = 10000
|
||||
for {
|
||||
agentIPs, err := this.db.ListAgentIPs(offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(agentIPs) == 0 {
|
||||
break
|
||||
}
|
||||
for _, agentIP := range agentIPs {
|
||||
this.locker.Lock()
|
||||
this.ipMap[agentIP.IP] = agentIP.AgentCode
|
||||
this.locker.Unlock()
|
||||
|
||||
if agentIP.Id > this.lastId {
|
||||
this.lastId = agentIP.Id
|
||||
}
|
||||
}
|
||||
offset += size
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Manager) LoopAll() error {
|
||||
for {
|
||||
hasNext, err := this.Loop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !hasNext {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop 单次循环获取数据
|
||||
func (this *Manager) Loop() (hasNext bool, err error) {
|
||||
rpcClient, err := rpc.SharedRPC()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ipsResp, err := rpcClient.ClientAgentIPRPC.ListClientAgentIPsAfterId(rpcClient.Context(), &pb.ListClientAgentIPsAfterIdRequest{
|
||||
Id: this.lastId,
|
||||
Size: 10000,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(ipsResp.ClientAgentIPs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
for _, agentIP := range ipsResp.ClientAgentIPs {
|
||||
if agentIP.ClientAgent == nil {
|
||||
// 设置ID
|
||||
if agentIP.Id > this.lastId {
|
||||
this.lastId = agentIP.Id
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// 写入到数据库
|
||||
err = this.db.InsertAgentIP(agentIP.Id, agentIP.Ip, agentIP.ClientAgent.Code)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// 写入Map
|
||||
this.locker.Lock()
|
||||
this.ipMap[agentIP.Ip] = agentIP.ClientAgent.Code
|
||||
this.locker.Unlock()
|
||||
|
||||
// 设置ID
|
||||
if agentIP.Id > this.lastId {
|
||||
this.lastId = agentIP.Id
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddIP 添加记录
|
||||
func (this *Manager) AddIP(ip string, agentCode string) {
|
||||
this.locker.Lock()
|
||||
this.ipMap[ip] = agentCode
|
||||
this.locker.Unlock()
|
||||
}
|
||||
|
||||
// LookupIP 查询IP所属Agent
|
||||
func (this *Manager) LookupIP(ip string) (agentCode string) {
|
||||
this.locker.RLock()
|
||||
defer this.locker.RUnlock()
|
||||
return this.ipMap[ip]
|
||||
}
|
||||
|
||||
// ContainsIP 检查是否有IP相关数据
|
||||
func (this *Manager) ContainsIP(ip string) bool {
|
||||
this.locker.RLock()
|
||||
defer this.locker.RUnlock()
|
||||
_, ok := this.ipMap[ip]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (this *Manager) loadDB() error {
|
||||
var sqlitePath = Tea.Root + "/data/agents.db"
|
||||
_, sqliteErr := os.Stat(sqlitePath)
|
||||
var db DB
|
||||
if sqliteErr == nil || !teaconst.EnableKVCacheStore {
|
||||
db = NewSQLiteDB(sqlitePath)
|
||||
} else {
|
||||
db = NewKVDB()
|
||||
}
|
||||
err := db.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.db = db
|
||||
return nil
|
||||
}
|
||||
40
EdgeNode/internal/utils/agents/manager_test.go
Normal file
40
EdgeNode/internal/utils/agents/manager_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/agents"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
_ "github.com/iwind/TeaGo/bootstrap"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewManager(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var db = agents.NewSQLiteDB(Tea.Root + "/data/agents.db")
|
||||
err := db.Init()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var manager = agents.NewManager()
|
||||
manager.SetDB(db)
|
||||
err = manager.Load()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = manager.Loop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(manager.LookupIP("192.168.3.100")) // not found
|
||||
t.Log(manager.LookupIP("66.249.79.25")) // google
|
||||
t.Log(manager.ContainsIP("66.249.79.25")) // true
|
||||
t.Log(manager.ContainsIP("66.249.79.255")) // not found
|
||||
}
|
||||
138
EdgeNode/internal/utils/agents/queue.go
Normal file
138
EdgeNode/internal/utils/agents/queue.go
Normal file
@@ -0,0 +1,138 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/rpc"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
"net"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
events.On(events.EventLoaded, func() {
|
||||
goman.New(func() {
|
||||
SharedQueue.Start()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var SharedQueue = NewQueue()
|
||||
|
||||
type Queue struct {
|
||||
c chan string // chan ip
|
||||
cacheMap *IPCacheMap
|
||||
}
|
||||
|
||||
func NewQueue() *Queue {
|
||||
return &Queue{
|
||||
c: make(chan string, 128),
|
||||
cacheMap: NewIPCacheMap(65535),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Queue) Start() {
|
||||
for ip := range this.c {
|
||||
err := this.Process(ip)
|
||||
if err != nil {
|
||||
// 不需要上报错误
|
||||
if Tea.IsTesting() {
|
||||
remotelogs.Debug("SharedParseQueue", err.Error())
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push 将IP加入到处理队列
|
||||
func (this *Queue) Push(ip string) {
|
||||
// 是否在处理中
|
||||
if this.cacheMap.Contains(ip) {
|
||||
return
|
||||
}
|
||||
this.cacheMap.Add(ip)
|
||||
|
||||
// 加入到队列
|
||||
select {
|
||||
case this.c <- ip:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Process 处理IP
|
||||
func (this *Queue) Process(ip string) error {
|
||||
// 是否已经在库中
|
||||
if SharedManager.ContainsIP(ip) {
|
||||
return nil
|
||||
}
|
||||
|
||||
ptr, err := this.ParseIP(ip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ptr) == 0 || ptr == "." {
|
||||
return nil
|
||||
}
|
||||
|
||||
//remotelogs.Debug("AGENT", ip+" => "+ptr)
|
||||
|
||||
var agentCode = this.ParsePtr(ptr)
|
||||
if len(agentCode) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 加入到本地
|
||||
SharedManager.AddIP(ip, agentCode)
|
||||
|
||||
var pbAgentIP = &pb.CreateClientAgentIPsRequest_AgentIPInfo{
|
||||
AgentCode: agentCode,
|
||||
Ip: ip,
|
||||
Ptr: ptr,
|
||||
}
|
||||
rpcClient, err := rpc.SharedRPC()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rpcClient.ClientAgentIPRPC.CreateClientAgentIPs(rpcClient.Context(), &pb.CreateClientAgentIPsRequest{AgentIPs: []*pb.CreateClientAgentIPsRequest_AgentIPInfo{pbAgentIP}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseIP 分析IP的PTR值
|
||||
func (this *Queue) ParseIP(ip string) (ptr string, err error) {
|
||||
if len(ip) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
names, err := net.LookupAddr(ip)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(names) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return names[0], nil
|
||||
}
|
||||
|
||||
// ParsePtr 分析PTR对应的Agent
|
||||
func (this *Queue) ParsePtr(ptr string) (agentCode string) {
|
||||
for _, agent := range AllAgents {
|
||||
if agent.Match(ptr) {
|
||||
return agent.Code
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
87
EdgeNode/internal/utils/agents/queue_test.go
Normal file
87
EdgeNode/internal/utils/agents/queue_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package agents_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/agents"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
_ "github.com/iwind/TeaGo/bootstrap"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseQueue_Process(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var queue = agents.NewQueue()
|
||||
go queue.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
queue.Push("220.181.13.100")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
func TestParseQueue_ParseIP(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var queue = agents.NewQueue()
|
||||
for _, ip := range []string{
|
||||
"192.168.1.100",
|
||||
"42.120.160.1",
|
||||
"42.236.10.98",
|
||||
"124.115.0.100",
|
||||
"185.1.213.197",
|
||||
} {
|
||||
ptr, err := queue.ParseIP(ip)
|
||||
if err != nil {
|
||||
t.Log(ip, "=>", err)
|
||||
continue
|
||||
}
|
||||
t.Log(ip, "=>", ptr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseQueue_ParsePtr(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var queue = agents.NewQueue()
|
||||
for _, s := range [][]string{
|
||||
{"baiduspider-220-181-108-101.crawl.baidu.com.", "baidu"},
|
||||
{"crawl-66-249-71-219.googlebot.com.", "google"},
|
||||
{"msnbot-40-77-167-31.search.msn.com.", "bing"},
|
||||
{"sogouspider-49-7-20-129.crawl.sogou.com.", "sogou"},
|
||||
{"m13102.mail.163.com.", "youdao"},
|
||||
{"yeurosport.pat1.tc2.yahoo.com.", "yahoo"},
|
||||
{"shenmaspider-42-120-160-1.crawl.sm.cn.", "sm"},
|
||||
{"93-158-161-39.spider.yandex.com.", "yandex"},
|
||||
{"25.bl.bot.semrush.com.", "semrush"},
|
||||
{"facebook-waw.1-ix.net.", "facebook"},
|
||||
} {
|
||||
a.IsTrue(queue.ParsePtr(s[0]) == s[1])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkQueue_ParsePtr(b *testing.B) {
|
||||
var queue = agents.NewQueue()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, s := range [][]string{
|
||||
{"baiduspider-220-181-108-101.crawl.baidu.com.", "baidu"},
|
||||
{"crawl-66-249-71-219.googlebot.com.", "google"},
|
||||
{"msnbot-40-77-167-31.search.msn.com.", "bing"},
|
||||
{"sogouspider-49-7-20-129.crawl.sogou.com.", "sogou"},
|
||||
{"m13102.mail.163.com.", "youdao"},
|
||||
{"yeurosport.pat1.tc2.yahoo.com.", "yahoo"},
|
||||
{"shenmaspider-42-120-160-1.crawl.sm.cn.", "sm"},
|
||||
{"93-158-161-39.spider.yandex.com.", "yandex"},
|
||||
{"93.158.164.218-red.dhcp.yndx.net.", "yandex"},
|
||||
{"25.bl.bot.semrush.com.", "semrush"},
|
||||
} {
|
||||
queue.ParsePtr(s[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
87
EdgeNode/internal/utils/bfs/DESIGN.md
Normal file
87
EdgeNode/internal/utils/bfs/DESIGN.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# BFS设计
|
||||
|
||||
## TODO
|
||||
|
||||
* block对指定内容可以压缩:比如text/html, text/plain
|
||||
* block offset信息可以自动合并
|
||||
* 实现bfs/下的所有TODO
|
||||
* 系统退出(quit/terminate/kill)时自动关闭文件
|
||||
* compact的时候同时compact .b和.m两个文件
|
||||
* 实现对可缓存文件尺寸的限制
|
||||
* 提前为文件扩展出空间: mFile, bFile
|
||||
* FileReader可以重用:使用完之后放入Pool,但要考虑到数据可能已经变更
|
||||
* 读的时候不允许修改相应区域
|
||||
* 在compact的时候尤其注意不修改正在Read的区域
|
||||
* 记录写入和读取速度,然后下次启动的时候根据写入和读取速度调整相关参数
|
||||
* 可以实现缓存数据加密功能
|
||||
* 校验mFile:可以在文件末尾写入一个特殊标记?比如$$$END$$$,下次读取的时候检查此标记是否仍然存在?可能导致写入性能较低
|
||||
* IMPORTANT
|
||||
* 实现空余空间重复利用:需要保证此块区域没有正在被读
|
||||
* 策略:单个文件内容写入时,先写入最大的Gap,写满之后,再写入到尾部,防止太过零碎
|
||||
* delete file的时候记录空闲blocks:freeBlocks
|
||||
* 再次被使用的时候减去空闲blocks
|
||||
* 实现bFile和mFile的compact、定时器
|
||||
* bFile和mFile的corruption检测
|
||||
* 增加End Block?
|
||||
* 增加 openWriter options
|
||||
* 增加 opnReader options
|
||||
* 在 MetaFile 中实现 HeaderBlocks和BodyBlocks 合并操作
|
||||
* 考虑 BlocksFile.Close()中是否要sync(),还是简单的close即可?这需要corruption检测支持
|
||||
* fs.BFilesMap分区管理,减少锁
|
||||
* 思考把打开BFile和关闭BFile移出锁
|
||||
* 完全避免 check status failed: the file closed
|
||||
* 增加重试功能?
|
||||
* limiter使用fsutils.Limiter
|
||||
|
||||
|
||||
|
||||
## 参考文档
|
||||
|
||||
* (CockroachDB's Storage Layer)[https://www.cockroachlabs.com/docs/stable/architecture/storage-layer]
|
||||
|
||||
## 设计目标
|
||||
|
||||
1亿个文件,20TiB文件内容。
|
||||
|
||||
## 目录结构
|
||||
|
||||
~~~
|
||||
00/
|
||||
a.b - 文件内容
|
||||
a.m - 元数据
|
||||
01/
|
||||
...
|
||||
~~~
|
||||
|
||||
## 数据结构
|
||||
|
||||
文件内容:
|
||||
|
||||
~~~
|
||||
block1, block2, ...
|
||||
~~~
|
||||
|
||||
元数据:
|
||||
|
||||
~~~
|
||||
hash
|
||||
modifiedAt
|
||||
expiresAt
|
||||
status
|
||||
fileSize
|
||||
headerSize
|
||||
bodySize
|
||||
[header blocks info]
|
||||
[body blocks info]
|
||||
~~~
|
||||
|
||||
元数据要点:
|
||||
|
||||
* 单个文件可以放入512个文件
|
||||
|
||||
block info:
|
||||
|
||||
~~~
|
||||
[fromOffset1, toOffset1 ], [fromOffset2, toOffset2 ], ...
|
||||
~~~
|
||||
|
||||
15
EdgeNode/internal/utils/bfs/block_info.go
Normal file
15
EdgeNode/internal/utils/bfs/block_info.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
type BlockInfo struct {
|
||||
OriginOffsetFrom int64 `json:"1,omitempty"`
|
||||
OriginOffsetTo int64 `json:"2,omitempty"`
|
||||
|
||||
BFileOffsetFrom int64 `json:"3,omitempty"`
|
||||
BFileOffsetTo int64 `json:"4,omitempty"`
|
||||
}
|
||||
|
||||
func (this BlockInfo) Contains(offset int64) bool {
|
||||
return this.OriginOffsetFrom <= offset && this.OriginOffsetTo > /** MUST be gt, NOT gte **/ offset
|
||||
}
|
||||
403
EdgeNode/internal/utils/bfs/blocks_file.go
Normal file
403
EdgeNode/internal/utils/bfs/blocks_file.go
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const BFileExt = ".b"
|
||||
|
||||
type BlockType string
|
||||
|
||||
const (
|
||||
BlockTypeHeader BlockType = "header"
|
||||
BlockTypeBody BlockType = "body"
|
||||
)
|
||||
|
||||
type BlocksFile struct {
|
||||
opt *BlockFileOptions
|
||||
fp *os.File
|
||||
mFile *MetaFile
|
||||
|
||||
isClosing bool
|
||||
isClosed bool
|
||||
|
||||
mu *sync.RWMutex
|
||||
|
||||
writtenBytes int64
|
||||
writingFileMap map[string]zero.Zero // hash => Zero
|
||||
syncAt time.Time
|
||||
|
||||
readerPool chan *FileReader
|
||||
countRefs int32
|
||||
}
|
||||
|
||||
func NewBlocksFileWithRawFile(fp *os.File, options *BlockFileOptions) (*BlocksFile, error) {
|
||||
options.EnsureDefaults()
|
||||
|
||||
var bFilename = fp.Name()
|
||||
if !strings.HasSuffix(bFilename, BFileExt) {
|
||||
return nil, errors.New("filename '" + bFilename + "' must has a '" + BFileExt + "' extension")
|
||||
}
|
||||
|
||||
var mu = &sync.RWMutex{}
|
||||
|
||||
var mFilename = strings.TrimSuffix(bFilename, BFileExt) + MFileExt
|
||||
mFile, err := OpenMetaFile(mFilename, mu)
|
||||
if err != nil {
|
||||
_ = fp.Close()
|
||||
return nil, fmt.Errorf("load '%s' failed: %w", mFilename, err)
|
||||
}
|
||||
|
||||
AckReadThread()
|
||||
_, err = fp.Seek(0, io.SeekEnd)
|
||||
ReleaseReadThread()
|
||||
if err != nil {
|
||||
_ = fp.Close()
|
||||
_ = mFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlocksFile{
|
||||
fp: fp,
|
||||
mFile: mFile,
|
||||
mu: mu,
|
||||
opt: options,
|
||||
syncAt: time.Now(),
|
||||
readerPool: make(chan *FileReader, 32),
|
||||
writingFileMap: map[string]zero.Zero{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func OpenBlocksFile(filename string, options *BlockFileOptions) (*BlocksFile, error) {
|
||||
// TODO 考虑是否使用flock锁定,防止多进程写冲突
|
||||
fp, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
var dir = filepath.Dir(filename)
|
||||
_ = os.MkdirAll(dir, 0777)
|
||||
|
||||
// try again
|
||||
fp, err = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0666)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open blocks file failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return NewBlocksFileWithRawFile(fp, options)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) Filename() string {
|
||||
return this.fp.Name()
|
||||
}
|
||||
|
||||
func (this *BlocksFile) Write(hash string, blockType BlockType, b []byte, originOffset int64) (n int, err error) {
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
posBefore, err := this.currentPos()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = this.checkStatus()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
n, err = this.fp.Write(b)
|
||||
ReleaseWriteThread()
|
||||
|
||||
if err == nil {
|
||||
if n > 0 {
|
||||
this.writtenBytes += int64(n)
|
||||
}
|
||||
|
||||
if blockType == BlockTypeHeader {
|
||||
err = this.mFile.WriteHeaderBlockUnsafe(hash, posBefore, posBefore+int64(n))
|
||||
} else if blockType == BlockTypeBody {
|
||||
err = this.mFile.WriteBodyBlockUnsafe(hash, posBefore, posBefore+int64(n), originOffset, originOffset+int64(n))
|
||||
} else {
|
||||
err = errors.New("invalid block type '" + string(blockType) + "'")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *BlocksFile) OpenFileWriter(fileHash string, bodySize int64, isPartial bool) (writer *FileWriter, err error) {
|
||||
err = CheckHashErr(fileHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
_, isWriting := this.writingFileMap[fileHash]
|
||||
if isWriting {
|
||||
err = ErrFileIsWriting
|
||||
return
|
||||
}
|
||||
this.writingFileMap[fileHash] = zero.Zero{}
|
||||
|
||||
err = this.checkStatus()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return NewFileWriter(this, fileHash, bodySize, isPartial)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) OpenFileReader(fileHash string, isPartial bool) (*FileReader, error) {
|
||||
err := CheckHashErr(fileHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
this.mu.RLock()
|
||||
err = this.checkStatus()
|
||||
this.mu.RUnlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 是否存在
|
||||
header, ok := this.mFile.CloneFileHeader(fileHash)
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// TODO 对于partial content,需要传入ranges,用来判断是否有交集
|
||||
|
||||
if header.IsWriting {
|
||||
return nil, ErrFileIsWriting
|
||||
}
|
||||
|
||||
if !isPartial && !header.IsCompleted {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
// 先尝试从Pool中获取
|
||||
select {
|
||||
case reader := <-this.readerPool:
|
||||
if reader == nil {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
reader.Reset(header)
|
||||
atomic.AddInt32(&this.countRefs, 1)
|
||||
return reader, nil
|
||||
default:
|
||||
}
|
||||
|
||||
AckReadThread()
|
||||
fp, err := os.Open(this.fp.Name())
|
||||
ReleaseReadThread()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atomic.AddInt32(&this.countRefs, 1)
|
||||
return NewFileReader(this, fp, header), nil
|
||||
}
|
||||
|
||||
func (this *BlocksFile) CloseFileReader(reader *FileReader) error {
|
||||
defer atomic.AddInt32(&this.countRefs, -1)
|
||||
|
||||
select {
|
||||
case this.readerPool <- reader:
|
||||
return nil
|
||||
default:
|
||||
return reader.Free()
|
||||
}
|
||||
}
|
||||
|
||||
func (this *BlocksFile) ExistFile(fileHash string) bool {
|
||||
err := CheckHashErr(fileHash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return this.mFile.ExistFile(fileHash)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) RemoveFile(fileHash string) error {
|
||||
err := CheckHashErr(fileHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return this.mFile.RemoveFile(fileHash)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) Sync() error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
err := this.checkStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return this.sync(false)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) ForceSync() error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
err := this.checkStatus()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return this.sync(true)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) SyncAt() time.Time {
|
||||
return this.syncAt
|
||||
}
|
||||
|
||||
func (this *BlocksFile) Compact() error {
|
||||
// TODO 需要实现
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *BlocksFile) RemoveAll() error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
this.isClosed = true
|
||||
|
||||
_ = this.mFile.RemoveAll()
|
||||
|
||||
this.closeReaderPool()
|
||||
|
||||
_ = this.fp.Close()
|
||||
return os.Remove(this.fp.Name())
|
||||
}
|
||||
|
||||
// CanClose 检查是否可以关闭
|
||||
func (this *BlocksFile) CanClose() bool {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
if len(this.writingFileMap) > 0 || atomic.LoadInt32(&this.countRefs) > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
this.isClosing = true
|
||||
return true
|
||||
}
|
||||
|
||||
// Close 关闭当前文件
|
||||
func (this *BlocksFile) Close() error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if this.isClosed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO 决定是否同步
|
||||
//_ = this.sync(true)
|
||||
|
||||
this.isClosed = true
|
||||
|
||||
_ = this.mFile.Close()
|
||||
|
||||
this.closeReaderPool()
|
||||
|
||||
return this.fp.Close()
|
||||
}
|
||||
|
||||
// IsClosing 判断当前文件是否正在关闭或者已关闭
|
||||
func (this *BlocksFile) IsClosing() bool {
|
||||
return this.isClosed || this.isClosing
|
||||
}
|
||||
|
||||
func (this *BlocksFile) IncrRef() {
|
||||
atomic.AddInt32(&this.countRefs, 1)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) DecrRef() {
|
||||
atomic.AddInt32(&this.countRefs, -1)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) TestReaderPool() chan *FileReader {
|
||||
return this.readerPool
|
||||
}
|
||||
|
||||
func (this *BlocksFile) removeWritingFile(hash string) {
|
||||
this.mu.Lock()
|
||||
delete(this.writingFileMap, hash)
|
||||
this.mu.Unlock()
|
||||
}
|
||||
|
||||
func (this *BlocksFile) checkStatus() error {
|
||||
if this.isClosed || this.isClosing {
|
||||
return fmt.Errorf("check status failed: %w", ErrClosed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *BlocksFile) currentPos() (int64, error) {
|
||||
return this.fp.Seek(0, io.SeekCurrent)
|
||||
}
|
||||
|
||||
func (this *BlocksFile) sync(force bool) error {
|
||||
if !force {
|
||||
if this.writtenBytes < this.opt.BytesPerSync {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if this.writtenBytes > 0 {
|
||||
AckWriteThread()
|
||||
err := this.fp.Sync()
|
||||
ReleaseWriteThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
this.writtenBytes = 0
|
||||
|
||||
this.syncAt = time.Now()
|
||||
|
||||
if force {
|
||||
return this.mFile.SyncUnsafe()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *BlocksFile) closeReaderPool() {
|
||||
for {
|
||||
select {
|
||||
case reader := <-this.readerPool:
|
||||
if reader != nil {
|
||||
_ = reader.Free()
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
17
EdgeNode/internal/utils/bfs/blocks_file_options.go
Normal file
17
EdgeNode/internal/utils/bfs/blocks_file_options.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
type BlockFileOptions struct {
|
||||
BytesPerSync int64
|
||||
}
|
||||
|
||||
func (this *BlockFileOptions) EnsureDefaults() {
|
||||
if this.BytesPerSync <= 0 {
|
||||
this.BytesPerSync = 1 << 20
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultBlockFileOptions = &BlockFileOptions{
|
||||
BytesPerSync: 1 << 20,
|
||||
}
|
||||
86
EdgeNode/internal/utils/bfs/blocks_file_test.go
Normal file
86
EdgeNode/internal/utils/bfs/blocks_file_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBlocksFile_CanClose(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
bFile, openErr := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if openErr != nil {
|
||||
if os.IsNotExist(openErr) {
|
||||
return
|
||||
}
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a.IsTrue(!bFile.CanClose())
|
||||
|
||||
err = reader.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// duplicated close
|
||||
err = reader.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a.IsTrue(bFile.CanClose())
|
||||
}
|
||||
|
||||
func TestBlocksFile_OpenFileWriter_SameHash(t *testing.T) {
|
||||
bFile, openErr := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if openErr != nil {
|
||||
if os.IsNotExist(openErr) {
|
||||
return
|
||||
}
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
|
||||
{
|
||||
writer, err := bFile.OpenFileWriter(bfs.Hash("123456"), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = writer.Close()
|
||||
}
|
||||
|
||||
{
|
||||
writer, err := bFile.OpenFileWriter(bfs.Hash("123456"), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = writer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlocksFile_RemoveAll(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = bFile.Close()
|
||||
}()
|
||||
|
||||
err = bFile.RemoveAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
20
EdgeNode/internal/utils/bfs/errors.go
Normal file
20
EdgeNode/internal/utils/bfs/errors.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("the file closed")
|
||||
var ErrInvalidHash = errors.New("invalid hash")
|
||||
var ErrFileIsWriting = errors.New("the file is writing")
|
||||
|
||||
func IsWritingErr(err error) bool {
|
||||
return err != nil && errors.Is(err, ErrFileIsWriting)
|
||||
}
|
||||
|
||||
func IsNotExist(err error) bool {
|
||||
return err != nil && os.IsNotExist(err)
|
||||
}
|
||||
203
EdgeNode/internal/utils/bfs/file_header.go
Normal file
203
EdgeNode/internal/utils/bfs/file_header.go
Normal file
@@ -0,0 +1,203 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type FileHeader struct {
|
||||
Version int `json:"1,omitempty"`
|
||||
ModifiedAt int64 `json:"2,omitempty"`
|
||||
ExpiresAt int64 `json:"3,omitempty"`
|
||||
Status int `json:"4,omitempty"`
|
||||
HeaderSize int64 `json:"5,omitempty"`
|
||||
BodySize int64 `json:"6,omitempty"`
|
||||
ExpiredBodySize int64 `json:"7,omitempty"`
|
||||
HeaderBlocks []BlockInfo `json:"8,omitempty"`
|
||||
BodyBlocks []BlockInfo `json:"9,omitempty"`
|
||||
IsCompleted bool `json:"10,omitempty"`
|
||||
IsWriting bool `json:"11,omitempty"`
|
||||
}
|
||||
|
||||
func (this *FileHeader) BlockAt(offset int64) (blockInfo BlockInfo, ok bool) {
|
||||
var l = len(this.BodyBlocks)
|
||||
if l == 1 {
|
||||
if this.BodyBlocks[0].Contains(offset) {
|
||||
return this.BodyBlocks[0], true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
sort.Search(l, func(i int) bool {
|
||||
if this.BodyBlocks[i].Contains(offset) {
|
||||
blockInfo = this.BodyBlocks[i]
|
||||
ok = true
|
||||
return true
|
||||
}
|
||||
return this.BodyBlocks[i].OriginOffsetFrom > offset
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileHeader) MaxOffset() int64 {
|
||||
var l = len(this.BodyBlocks)
|
||||
if l > 0 {
|
||||
return this.BodyBlocks[l-1].OriginOffsetTo
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Compact blocks
|
||||
func (this *FileHeader) Compact() {
|
||||
this.compactHeader()
|
||||
this.compactBody()
|
||||
}
|
||||
|
||||
// compact header blocks
|
||||
func (this *FileHeader) compactHeader() {
|
||||
var l = len(this.HeaderBlocks)
|
||||
if l > 1 {
|
||||
// 合并
|
||||
var newBlocks []BlockInfo
|
||||
var newIndex int
|
||||
for index, currentBlock := range this.HeaderBlocks {
|
||||
if index == 0 {
|
||||
newBlocks = append(newBlocks, currentBlock)
|
||||
newIndex++
|
||||
continue
|
||||
}
|
||||
|
||||
var lastBlock = newBlocks[newIndex-1]
|
||||
if currentBlock.OriginOffsetFrom >= lastBlock.OriginOffsetFrom &&
|
||||
currentBlock.OriginOffsetFrom <= /* MUST gte */ lastBlock.OriginOffsetTo &&
|
||||
currentBlock.OriginOffsetFrom-lastBlock.OriginOffsetFrom == currentBlock.BFileOffsetFrom-lastBlock.BFileOffsetFrom /* 两侧距离一致 */ {
|
||||
if currentBlock.OriginOffsetTo > lastBlock.OriginOffsetTo {
|
||||
lastBlock.OriginOffsetTo = currentBlock.OriginOffsetTo
|
||||
lastBlock.BFileOffsetTo = currentBlock.BFileOffsetTo
|
||||
newBlocks[newIndex-1] = lastBlock
|
||||
}
|
||||
} else {
|
||||
newBlocks = append(newBlocks, currentBlock)
|
||||
newIndex++
|
||||
}
|
||||
}
|
||||
this.HeaderBlocks = newBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// sort and compact body blocks
|
||||
func (this *FileHeader) compactBody() {
|
||||
var l = len(this.BodyBlocks)
|
||||
|
||||
if l > 0 {
|
||||
if l > 1 {
|
||||
// 排序
|
||||
sort.Slice(this.BodyBlocks, func(i, j int) bool {
|
||||
var block1 = this.BodyBlocks[i]
|
||||
var block2 = this.BodyBlocks[j]
|
||||
if block1.OriginOffsetFrom == block1.OriginOffsetFrom {
|
||||
return block1.OriginOffsetTo < block2.OriginOffsetTo
|
||||
}
|
||||
return block1.OriginOffsetFrom < block2.OriginOffsetFrom
|
||||
})
|
||||
|
||||
// 合并
|
||||
var newBlocks []BlockInfo
|
||||
var newIndex int
|
||||
for index, currentBlock := range this.BodyBlocks {
|
||||
if index == 0 {
|
||||
newBlocks = append(newBlocks, currentBlock)
|
||||
newIndex++
|
||||
continue
|
||||
}
|
||||
|
||||
var lastBlock = newBlocks[newIndex-1]
|
||||
if currentBlock.OriginOffsetFrom >= lastBlock.OriginOffsetFrom &&
|
||||
currentBlock.OriginOffsetFrom <= /* MUST gte */ lastBlock.OriginOffsetTo &&
|
||||
currentBlock.OriginOffsetFrom-lastBlock.OriginOffsetFrom == currentBlock.BFileOffsetFrom-lastBlock.BFileOffsetFrom /* 两侧距离一致 */ {
|
||||
if currentBlock.OriginOffsetTo > lastBlock.OriginOffsetTo {
|
||||
lastBlock.OriginOffsetTo = currentBlock.OriginOffsetTo
|
||||
lastBlock.BFileOffsetTo = currentBlock.BFileOffsetTo
|
||||
newBlocks[newIndex-1] = lastBlock
|
||||
}
|
||||
} else {
|
||||
newBlocks = append(newBlocks, currentBlock)
|
||||
newIndex++
|
||||
}
|
||||
}
|
||||
this.BodyBlocks = newBlocks
|
||||
l = len(this.BodyBlocks)
|
||||
}
|
||||
|
||||
// 检查是否已完成
|
||||
var isCompleted = true
|
||||
if this.BodyBlocks[0].OriginOffsetFrom != 0 || this.BodyBlocks[len(this.BodyBlocks)-1].OriginOffsetTo != this.BodySize {
|
||||
isCompleted = false
|
||||
} else {
|
||||
for index, block := range this.BodyBlocks {
|
||||
// 是否有不连续的
|
||||
if index > 0 && block.OriginOffsetFrom > this.BodyBlocks[index-1].OriginOffsetTo {
|
||||
isCompleted = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
this.IsCompleted = isCompleted
|
||||
}
|
||||
}
|
||||
|
||||
// Clone current header
|
||||
func (this *FileHeader) Clone() *FileHeader {
|
||||
return &FileHeader{
|
||||
Version: this.Version,
|
||||
ModifiedAt: this.ModifiedAt,
|
||||
ExpiresAt: this.ExpiresAt,
|
||||
Status: this.Status,
|
||||
HeaderSize: this.HeaderSize,
|
||||
BodySize: this.BodySize,
|
||||
ExpiredBodySize: this.ExpiredBodySize,
|
||||
HeaderBlocks: this.HeaderBlocks,
|
||||
BodyBlocks: this.BodyBlocks,
|
||||
IsCompleted: this.IsCompleted,
|
||||
IsWriting: this.IsWriting,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *FileHeader) Encode(hash string) ([]byte, error) {
|
||||
headerJSON, err := json.Marshal(this)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// we do not compress data which size is less than 100 bytes
|
||||
if len(headerJSON) < 100 {
|
||||
return EncodeMetaBlock(MetaActionNew, hash, append([]byte("json:"), headerJSON...))
|
||||
}
|
||||
|
||||
var buf = utils.SharedBufferPool.Get()
|
||||
defer utils.SharedBufferPool.Put(buf)
|
||||
|
||||
compressor, err := SharedCompressPool.Get(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = compressor.Write(headerJSON)
|
||||
if err != nil {
|
||||
_ = compressor.Close()
|
||||
SharedCompressPool.Put(compressor)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = compressor.Close()
|
||||
SharedCompressPool.Put(compressor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return EncodeMetaBlock(MetaActionNew, hash, buf.Bytes())
|
||||
}
|
||||
67
EdgeNode/internal/utils/bfs/file_header_lazy.go
Normal file
67
EdgeNode/internal/utils/bfs/file_header_lazy.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// LazyFileHeader load file header lazily to save memory
|
||||
type LazyFileHeader struct {
|
||||
rawData []byte
|
||||
fileHeader *FileHeader
|
||||
}
|
||||
|
||||
func NewLazyFileHeaderFromData(rawData []byte) *LazyFileHeader {
|
||||
return &LazyFileHeader{
|
||||
rawData: rawData,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLazyFileHeader(fileHeader *FileHeader) *LazyFileHeader {
|
||||
return &LazyFileHeader{
|
||||
fileHeader: fileHeader,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *LazyFileHeader) FileHeaderUnsafe() (*FileHeader, error) {
|
||||
if this.fileHeader != nil {
|
||||
return this.fileHeader, nil
|
||||
}
|
||||
|
||||
var jsonPrefix = []byte("json:")
|
||||
|
||||
var header = &FileHeader{}
|
||||
|
||||
// json
|
||||
if bytes.HasPrefix(this.rawData, jsonPrefix) {
|
||||
err := json.Unmarshal(this.rawData[len(jsonPrefix):], header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return header, nil
|
||||
}
|
||||
|
||||
decompressor, err := SharedDecompressPool.Get(bytes.NewBuffer(this.rawData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = decompressor.Close()
|
||||
SharedDecompressPool.Put(decompressor)
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(decompressor).Decode(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header.IsWriting = false
|
||||
|
||||
this.fileHeader = header
|
||||
this.rawData = nil
|
||||
|
||||
return header, nil
|
||||
}
|
||||
87
EdgeNode/internal/utils/bfs/file_header_lazy_test.go
Normal file
87
EdgeNode/internal/utils/bfs/file_header_lazy_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewLazyFileHeaderFromData(t *testing.T) {
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 0,
|
||||
BFileOffsetTo: 1 << 20,
|
||||
},
|
||||
},
|
||||
}
|
||||
blockBytes, err := header.Encode(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, _, rawData, err := bfs.DecodeMetaBlock(blockBytes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var lazyHeader = bfs.NewLazyFileHeaderFromData(rawData)
|
||||
newHeader, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(newHeader)
|
||||
}
|
||||
|
||||
func BenchmarkLazyFileHeader_Decode(b *testing.B) {
|
||||
runtime.GOMAXPROCS(12)
|
||||
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{},
|
||||
}
|
||||
var offset int64
|
||||
for {
|
||||
var end = offset + 16<<10
|
||||
if end > 1<<20 {
|
||||
break
|
||||
}
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: offset,
|
||||
BFileOffsetTo: end,
|
||||
})
|
||||
|
||||
offset = end
|
||||
}
|
||||
|
||||
var hash = bfs.Hash("123456")
|
||||
|
||||
blockBytes, err := header.Encode(hash)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, _, rawData, decodeErr := bfs.DecodeMetaBlock(blockBytes)
|
||||
if decodeErr != nil {
|
||||
b.Fatal(decodeErr)
|
||||
}
|
||||
|
||||
var lazyHeader = bfs.NewLazyFileHeaderFromData(rawData)
|
||||
_, decodeErr = lazyHeader.FileHeaderUnsafe()
|
||||
if decodeErr != nil {
|
||||
b.Fatal(decodeErr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
432
EdgeNode/internal/utils/bfs/file_header_test.go
Normal file
432
EdgeNode/internal/utils/bfs/file_header_test.go
Normal file
@@ -0,0 +1,432 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFileHeader_Compact(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 100,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 0,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsTrue(header.IsCompleted)
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 100,
|
||||
OriginOffsetTo: 200,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 0,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsTrue(header.IsCompleted)
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 10,
|
||||
OriginOffsetTo: 99,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 110,
|
||||
OriginOffsetTo: 200,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 88,
|
||||
OriginOffsetTo: 120,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 0,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsTrue(header.IsCompleted)
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 100,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 10,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 100,
|
||||
OriginOffsetTo: 200,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsFalse(header.IsCompleted)
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 0,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 100,
|
||||
OriginOffsetTo: 199,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsFalse(header.IsCompleted)
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
OriginOffsetFrom: 0,
|
||||
OriginOffsetTo: 100,
|
||||
},
|
||||
{
|
||||
OriginOffsetFrom: 101,
|
||||
OriginOffsetTo: 200,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
a.IsFalse(header.IsCompleted)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileHeader_Compact_Merge(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
HeaderBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 1000,
|
||||
BFileOffsetTo: 1100,
|
||||
OriginOffsetFrom: 1200,
|
||||
OriginOffsetTo: 1300,
|
||||
},
|
||||
{
|
||||
BFileOffsetFrom: 1100,
|
||||
BFileOffsetTo: 1200,
|
||||
OriginOffsetFrom: 1300,
|
||||
OriginOffsetTo: 1400,
|
||||
},
|
||||
},
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 0,
|
||||
BFileOffsetTo: 100,
|
||||
OriginOffsetFrom: 200,
|
||||
OriginOffsetTo: 300,
|
||||
},
|
||||
{
|
||||
BFileOffsetFrom: 100,
|
||||
BFileOffsetTo: 200,
|
||||
OriginOffsetFrom: 300,
|
||||
OriginOffsetTo: 400,
|
||||
},
|
||||
{
|
||||
BFileOffsetFrom: 200,
|
||||
BFileOffsetTo: 300,
|
||||
OriginOffsetFrom: 400,
|
||||
OriginOffsetTo: 500,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
logs.PrintAsJSON(header.HeaderBlocks)
|
||||
logs.PrintAsJSON(header.BodyBlocks)
|
||||
|
||||
a.IsTrue(len(header.HeaderBlocks) == 1)
|
||||
a.IsTrue(len(header.BodyBlocks) == 1)
|
||||
}
|
||||
|
||||
func TestFileHeader_Compact_Merge2(t *testing.T) {
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 0,
|
||||
BFileOffsetTo: 100,
|
||||
OriginOffsetFrom: 200,
|
||||
OriginOffsetTo: 300,
|
||||
},
|
||||
{
|
||||
BFileOffsetFrom: 101,
|
||||
BFileOffsetTo: 200,
|
||||
OriginOffsetFrom: 301,
|
||||
OriginOffsetTo: 400,
|
||||
},
|
||||
{
|
||||
BFileOffsetFrom: 200,
|
||||
BFileOffsetTo: 300,
|
||||
OriginOffsetFrom: 400,
|
||||
OriginOffsetTo: 500,
|
||||
},
|
||||
},
|
||||
}
|
||||
header.Compact()
|
||||
logs.PrintAsJSON(header.BodyBlocks)
|
||||
}
|
||||
|
||||
func TestFileHeader_Clone(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 0,
|
||||
BFileOffsetTo: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var clonedHeader = header.Clone()
|
||||
t.Log("=== cloned header ===")
|
||||
logs.PrintAsJSON(clonedHeader, t)
|
||||
a.IsTrue(len(clonedHeader.BodyBlocks) == 1)
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: 100,
|
||||
BFileOffsetTo: 200,
|
||||
})
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: 300,
|
||||
BFileOffsetTo: 400,
|
||||
})
|
||||
|
||||
clonedHeader.BodyBlocks[0].OriginOffsetFrom = 100000000
|
||||
|
||||
t.Log("=== after changed ===")
|
||||
logs.PrintAsJSON(clonedHeader, t)
|
||||
a.IsTrue(len(clonedHeader.BodyBlocks) == 1)
|
||||
|
||||
t.Log("=== original header ===")
|
||||
logs.PrintAsJSON(header, t)
|
||||
a.IsTrue(header.BodyBlocks[0].OriginOffsetFrom != clonedHeader.BodyBlocks[0].OriginOffsetFrom)
|
||||
}
|
||||
|
||||
func TestFileHeader_Encode(t *testing.T) {
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
ModifiedAt: fasttime.Now().Unix(),
|
||||
ExpiresAt: fasttime.Now().Unix() + 3600,
|
||||
BodySize: 1 << 20,
|
||||
HeaderSize: 1 << 10,
|
||||
BodyBlocks: []bfs.BlockInfo{
|
||||
{
|
||||
BFileOffsetFrom: 1 << 10,
|
||||
BFileOffsetTo: 1 << 20,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := header.Encode(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonBytes, _ := json.Marshal(header)
|
||||
t.Log(len(header.BodyBlocks), "blocks", len(data), "bytes", "json:", len(jsonBytes), "bytes")
|
||||
|
||||
_, _, _, err = bfs.DecodeMetaBlock(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{},
|
||||
}
|
||||
var offset int64
|
||||
for {
|
||||
var end = offset + 16<<10
|
||||
if end > 256<<10 {
|
||||
break
|
||||
}
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: offset,
|
||||
BFileOffsetTo: end,
|
||||
})
|
||||
|
||||
offset = end
|
||||
}
|
||||
data, err := header.Encode(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonBytes, _ := json.Marshal(header)
|
||||
t.Log(len(header.BodyBlocks), "blocks", len(data), "bytes", "json:", len(jsonBytes), "bytes")
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{},
|
||||
}
|
||||
var offset int64
|
||||
for {
|
||||
var end = offset + 16<<10
|
||||
if end > 512<<10 {
|
||||
break
|
||||
}
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: offset,
|
||||
BFileOffsetTo: end,
|
||||
})
|
||||
|
||||
offset = end
|
||||
}
|
||||
data, err := header.Encode(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonBytes, _ := json.Marshal(header)
|
||||
t.Log(len(header.BodyBlocks), "blocks", len(data), "bytes", "json:", len(jsonBytes), "bytes")
|
||||
}
|
||||
|
||||
{
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodyBlocks: []bfs.BlockInfo{},
|
||||
}
|
||||
var offset int64
|
||||
for {
|
||||
var end = offset + 16<<10
|
||||
if end > 1<<20 {
|
||||
break
|
||||
}
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: offset,
|
||||
BFileOffsetTo: end,
|
||||
})
|
||||
|
||||
offset = end
|
||||
}
|
||||
data, err := header.Encode(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonBytes, _ := json.Marshal(header)
|
||||
t.Log(len(header.BodyBlocks), "blocks", len(data), "bytes", "json:", len(jsonBytes), "bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileHeader_Compact(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
BodySize: 200,
|
||||
BodyBlocks: nil,
|
||||
}
|
||||
|
||||
for j := 0; j < 100; j++ {
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
OriginOffsetFrom: int64(j * 100),
|
||||
OriginOffsetTo: int64(j * 200),
|
||||
BFileOffsetFrom: 0,
|
||||
BFileOffsetTo: 0,
|
||||
})
|
||||
}
|
||||
|
||||
header.Compact()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFileHeader_Encode(b *testing.B) {
|
||||
runtime.GOMAXPROCS(12)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var header = &bfs.FileHeader{
|
||||
Version: 1,
|
||||
Status: 200,
|
||||
ModifiedAt: rand.Int63(),
|
||||
BodySize: rand.Int63(),
|
||||
BodyBlocks: []bfs.BlockInfo{},
|
||||
}
|
||||
var offset int64
|
||||
for {
|
||||
var end = offset + 16<<10
|
||||
if end > 2<<20 {
|
||||
break
|
||||
}
|
||||
|
||||
header.BodyBlocks = append(header.BodyBlocks, bfs.BlockInfo{
|
||||
BFileOffsetFrom: offset + int64(rand.Int()%1000000),
|
||||
BFileOffsetTo: end + int64(rand.Int()%1000000),
|
||||
})
|
||||
|
||||
offset = end
|
||||
}
|
||||
|
||||
var hash = bfs.Hash("123456")
|
||||
|
||||
_, err := header.Encode(hash)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
88
EdgeNode/internal/utils/bfs/file_reader.go
Normal file
88
EdgeNode/internal/utils/bfs/file_reader.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type FileReader struct {
|
||||
bFile *BlocksFile
|
||||
fp *os.File
|
||||
|
||||
fileHeader *FileHeader
|
||||
pos int64
|
||||
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
func NewFileReader(bFile *BlocksFile, fp *os.File, fileHeader *FileHeader) *FileReader {
|
||||
return &FileReader{
|
||||
bFile: bFile,
|
||||
fp: fp,
|
||||
fileHeader: fileHeader,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *FileReader) FileHeader() *FileHeader {
|
||||
return this.fileHeader
|
||||
}
|
||||
|
||||
func (this *FileReader) Read(b []byte) (n int, err error) {
|
||||
n, err = this.ReadAt(b, this.pos)
|
||||
this.pos += int64(n)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileReader) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||
if offset >= this.fileHeader.MaxOffset() {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
blockInfo, ok := this.fileHeader.BlockAt(offset)
|
||||
if !ok {
|
||||
err = errors.New("could not find block at '" + types.String(offset) + "'")
|
||||
return
|
||||
}
|
||||
|
||||
var delta = offset - blockInfo.OriginOffsetFrom
|
||||
var bFrom = blockInfo.BFileOffsetFrom + delta
|
||||
var bTo = blockInfo.BFileOffsetTo
|
||||
if bFrom > bTo {
|
||||
err = errors.New("invalid block information")
|
||||
return
|
||||
}
|
||||
|
||||
var bufLen = len(b)
|
||||
if int64(bufLen) > bTo-bFrom {
|
||||
bufLen = int(bTo - bFrom)
|
||||
}
|
||||
|
||||
AckReadThread()
|
||||
n, err = this.fp.ReadAt(b[:bufLen], bFrom)
|
||||
ReleaseReadThread()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileReader) Reset(fileHeader *FileHeader) {
|
||||
this.fileHeader = fileHeader
|
||||
this.pos = 0
|
||||
}
|
||||
|
||||
func (this *FileReader) Close() error {
|
||||
if this.isClosed {
|
||||
return nil
|
||||
}
|
||||
this.isClosed = true
|
||||
return this.bFile.CloseFileReader(this)
|
||||
}
|
||||
|
||||
func (this *FileReader) Free() error {
|
||||
return this.fp.Close()
|
||||
}
|
||||
237
EdgeNode/internal/utils/bfs/file_reader_test.go
Normal file
237
EdgeNode/internal/utils/bfs/file_reader_test.go
Normal file
@@ -0,0 +1,237 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileReader_Read_SmallBuf(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = reader.Close()
|
||||
}()
|
||||
|
||||
var buf = make([]byte, 3)
|
||||
for {
|
||||
n, readErr := reader.Read(buf)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileReader_Read_LargeBuff(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = reader.Close()
|
||||
}()
|
||||
|
||||
var buf = make([]byte, 128)
|
||||
for {
|
||||
n, readErr := reader.Read(buf)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileReader_Read_LargeFile(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456@LARGE"), false)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = reader.Close()
|
||||
}()
|
||||
|
||||
var buf = make([]byte, 16<<10)
|
||||
var totalSize int64
|
||||
var before = time.Now()
|
||||
for {
|
||||
n, readErr := reader.Read(buf)
|
||||
if n > 0 {
|
||||
totalSize += int64(n)
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
t.Log("totalSize:", totalSize>>20, "MiB", "cost:", fmt.Sprintf("%.4fms", time.Since(before).Seconds()*1000))
|
||||
}
|
||||
|
||||
func TestFileReader_ReadAt(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = reader.Close()
|
||||
}()
|
||||
|
||||
{
|
||||
var buf = make([]byte, 3)
|
||||
n, readErr := reader.ReadAt(buf, 0)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var buf = make([]byte, 3)
|
||||
n, readErr := reader.ReadAt(buf, 3)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var buf = make([]byte, 11)
|
||||
n, readErr := reader.ReadAt(buf, 3)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var buf = make([]byte, 3)
|
||||
n, readErr := reader.ReadAt(buf, 11)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
}
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var buf = make([]byte, 3)
|
||||
n, readErr := reader.ReadAt(buf, 1000)
|
||||
if n > 0 {
|
||||
t.Log(string(buf[:n]))
|
||||
} else {
|
||||
t.Log("EOF")
|
||||
}
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
t.Fatal(readErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileReader_Pool(t *testing.T) {
|
||||
bFile, openErr := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if openErr != nil {
|
||||
if os.IsNotExist(openErr) {
|
||||
t.Log(openErr)
|
||||
return
|
||||
}
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
reader, err := bFile.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
err = reader.Close()
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
t.Log(len(bFile.TestReaderPool()))
|
||||
}
|
||||
112
EdgeNode/internal/utils/bfs/file_writer.go
Normal file
112
EdgeNode/internal/utils/bfs/file_writer.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import "errors"
|
||||
|
||||
// FileWriter file writer
|
||||
// not thread-safe
|
||||
type FileWriter struct {
|
||||
bFile *BlocksFile
|
||||
hasMeta bool
|
||||
hash string
|
||||
|
||||
bodySize int64
|
||||
originOffset int64
|
||||
|
||||
realHeaderSize int64
|
||||
realBodySize int64
|
||||
isPartial bool
|
||||
}
|
||||
|
||||
func NewFileWriter(bFile *BlocksFile, hash string, bodySize int64, isPartial bool) (*FileWriter, error) {
|
||||
if isPartial && bodySize <= 0 {
|
||||
return nil, errors.New("invalid body size for partial content")
|
||||
}
|
||||
|
||||
return &FileWriter{
|
||||
bFile: bFile,
|
||||
hash: hash,
|
||||
bodySize: bodySize,
|
||||
isPartial: isPartial,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (this *FileWriter) WriteMeta(status int, expiresAt int64, expectedFileSize int64) error {
|
||||
this.hasMeta = true
|
||||
return this.bFile.mFile.WriteMeta(this.hash, status, expiresAt, expectedFileSize)
|
||||
}
|
||||
|
||||
func (this *FileWriter) WriteHeader(b []byte) (n int, err error) {
|
||||
if !this.isPartial && !this.hasMeta {
|
||||
err = errors.New("no meta found")
|
||||
return
|
||||
}
|
||||
|
||||
n, err = this.bFile.Write(this.hash, BlockTypeHeader, b, -1)
|
||||
this.realHeaderSize += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileWriter) WriteBody(b []byte) (n int, err error) {
|
||||
if !this.isPartial && !this.hasMeta {
|
||||
err = errors.New("no meta found")
|
||||
return
|
||||
}
|
||||
|
||||
n, err = this.bFile.Write(this.hash, BlockTypeBody, b, this.originOffset)
|
||||
this.originOffset += int64(n)
|
||||
this.realBodySize += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileWriter) WriteBodyAt(b []byte, offset int64) (n int, err error) {
|
||||
if !this.hasMeta {
|
||||
err = errors.New("no meta found")
|
||||
return
|
||||
}
|
||||
|
||||
if !this.isPartial {
|
||||
err = errors.New("can not write body at specified offset: it is not a partial file")
|
||||
return
|
||||
}
|
||||
|
||||
// still 'Write()' NOT 'WriteAt()'
|
||||
this.originOffset = offset
|
||||
n, err = this.bFile.Write(this.hash, BlockTypeBody, b, offset)
|
||||
this.originOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (this *FileWriter) Close() error {
|
||||
defer func() {
|
||||
this.bFile.removeWritingFile(this.hash)
|
||||
}()
|
||||
|
||||
if !this.isPartial && !this.hasMeta {
|
||||
return errors.New("no meta found")
|
||||
}
|
||||
|
||||
if this.isPartial {
|
||||
if this.originOffset > this.bodySize {
|
||||
return errors.New("unexpected body size")
|
||||
}
|
||||
this.realBodySize = this.bodySize
|
||||
} else {
|
||||
if this.bodySize > 0 && this.bodySize != this.realBodySize {
|
||||
return errors.New("unexpected body size")
|
||||
}
|
||||
}
|
||||
|
||||
err := this.bFile.mFile.WriteClose(this.hash, this.realHeaderSize, this.realBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return this.bFile.Sync()
|
||||
}
|
||||
|
||||
func (this *FileWriter) Discard() error {
|
||||
// TODO 需要测试
|
||||
return this.bFile.mFile.RemoveFile(this.hash)
|
||||
}
|
||||
134
EdgeNode/internal/utils/bfs/file_writer_test.go
Normal file
134
EdgeNode/internal/utils/bfs/file_writer_test.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewFileWriter(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !testutils.IsSingleTesting() {
|
||||
_ = bFile.RemoveAll()
|
||||
} else {
|
||||
_ = bFile.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
writer, err := bFile.OpenFileWriter(bfs.Hash("123456"), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = writer.WriteMeta(http.StatusOK, fasttime.Now().Unix()+3600, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = writer.WriteHeader([]byte("Content-Type: text/html; charset=utf-8"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
n, writeErr := writer.WriteBody([]byte("Hello,World"))
|
||||
if writeErr != nil {
|
||||
t.Fatal(writeErr)
|
||||
}
|
||||
|
||||
t.Log("wrote:", n, "bytes")
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFileWriter_LargeFile(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !testutils.IsSingleTesting() {
|
||||
_ = bFile.RemoveAll()
|
||||
} else {
|
||||
_ = bFile.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
writer, err := bFile.OpenFileWriter(bfs.Hash("123456@LARGE"), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = writer.WriteMeta(http.StatusOK, fasttime.Now().Unix()+86400, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var countBlocks = 1 << 10
|
||||
if !testutils.IsSingleTesting() {
|
||||
countBlocks = 2
|
||||
}
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, 16<<10)
|
||||
|
||||
var before = time.Now()
|
||||
for i := 0; i < countBlocks; i++ {
|
||||
_, err = writer.WriteBody(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
logs.Println("cost:", time.Since(before).Seconds()*1000, "ms")
|
||||
}
|
||||
|
||||
func TestFileWriter_WriteBodyAt(t *testing.T) {
|
||||
bFile, err := bfs.OpenBlocksFile("testdata/test.b", bfs.DefaultBlockFileOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if !testutils.IsSingleTesting() {
|
||||
_ = bFile.RemoveAll()
|
||||
} else {
|
||||
_ = bFile.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
writer, err := bFile.OpenFileWriter(bfs.Hash("123456"), 1<<20, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
{
|
||||
n, writeErr := writer.WriteBodyAt([]byte("Hello,World"), 1024)
|
||||
if writeErr != nil {
|
||||
t.Fatal(writeErr)
|
||||
}
|
||||
|
||||
t.Log("wrote:", n, "bytes")
|
||||
}
|
||||
}
|
||||
442
EdgeNode/internal/utils/bfs/fs.go
Normal file
442
EdgeNode/internal/utils/bfs/fs.go
Normal file
@@ -0,0 +1,442 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func IsEnabled() bool {
|
||||
return runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64"
|
||||
}
|
||||
|
||||
// FS 文件系统对象
|
||||
type FS struct {
|
||||
dir string
|
||||
opt *FSOptions
|
||||
|
||||
bMap map[string]*BlocksFile // name => *BlocksFile
|
||||
bList *linkedlist.List[string] // [bName]
|
||||
bItemMap map[string]*linkedlist.Item[string]
|
||||
closingBMap map[string]zero.Zero // filename => Zero
|
||||
closingBChan chan *BlocksFile
|
||||
|
||||
mu *sync.RWMutex
|
||||
isClosed bool
|
||||
|
||||
syncTicker *time.Ticker
|
||||
|
||||
locker *fsutils.Locker
|
||||
}
|
||||
|
||||
// OpenFS 打开文件系统
|
||||
func OpenFS(dir string, options *FSOptions) (*FS, error) {
|
||||
if !IsEnabled() {
|
||||
return nil, errors.New("the fs only works under 64 bit system")
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
options = DefaultFSOptions
|
||||
} else {
|
||||
options.EnsureDefaults()
|
||||
}
|
||||
|
||||
var locker = fsutils.NewLocker(dir + "/fs")
|
||||
err := locker.Lock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fs = &FS{
|
||||
dir: dir,
|
||||
bMap: map[string]*BlocksFile{},
|
||||
bList: linkedlist.NewList[string](),
|
||||
bItemMap: map[string]*linkedlist.Item[string]{},
|
||||
closingBMap: map[string]zero.Zero{},
|
||||
closingBChan: make(chan *BlocksFile, 32),
|
||||
mu: &sync.RWMutex{},
|
||||
opt: options,
|
||||
syncTicker: time.NewTicker(1 * time.Second),
|
||||
locker: locker,
|
||||
}
|
||||
go fs.init()
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (this *FS) init() {
|
||||
go func() {
|
||||
// sync in background
|
||||
for range this.syncTicker.C {
|
||||
this.syncLoop()
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
this.processClosingBFiles()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// OpenFileWriter 打开文件写入器
|
||||
func (this *FS) OpenFileWriter(hash string, bodySize int64, isPartial bool) (*FileWriter, error) {
|
||||
if this.isClosed {
|
||||
return nil, errors.New("the fs closed")
|
||||
}
|
||||
|
||||
if isPartial && bodySize <= 0 {
|
||||
return nil, errors.New("invalid body size for partial content")
|
||||
}
|
||||
|
||||
bFile, err := this.openBFileForHashWriting(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bFile.OpenFileWriter(hash, bodySize, isPartial)
|
||||
}
|
||||
|
||||
// OpenFileReader 打开文件读取器
|
||||
func (this *FS) OpenFileReader(hash string, isPartial bool) (*FileReader, error) {
|
||||
if this.isClosed {
|
||||
return nil, errors.New("the fs closed")
|
||||
}
|
||||
|
||||
bFile, err := this.openBFileForHashReading(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bFile.OpenFileReader(hash, isPartial)
|
||||
}
|
||||
|
||||
func (this *FS) ExistFile(hash string) (bool, error) {
|
||||
if this.isClosed {
|
||||
return false, errors.New("the fs closed")
|
||||
}
|
||||
|
||||
bFile, err := this.openBFileForHashReading(hash)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bFile.ExistFile(hash), nil
|
||||
}
|
||||
|
||||
func (this *FS) RemoveFile(hash string) error {
|
||||
if this.isClosed {
|
||||
return errors.New("the fs closed")
|
||||
}
|
||||
|
||||
bFile, err := this.openBFileForHashWriting(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bFile.RemoveFile(hash)
|
||||
}
|
||||
|
||||
func (this *FS) Close() error {
|
||||
if this.isClosed {
|
||||
return nil
|
||||
}
|
||||
|
||||
this.isClosed = true
|
||||
|
||||
close(this.closingBChan)
|
||||
this.syncTicker.Stop()
|
||||
|
||||
var lastErr error
|
||||
this.mu.Lock()
|
||||
if len(this.bMap) > 0 {
|
||||
var g = goman.NewTaskGroup()
|
||||
for _, bFile := range this.bMap {
|
||||
var bFileCopy = bFile
|
||||
g.Run(func() {
|
||||
err := bFileCopy.Close()
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
g.Wait()
|
||||
}
|
||||
this.mu.Unlock()
|
||||
|
||||
err := this.locker.Release()
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (this *FS) TestBMap() map[string]*BlocksFile {
|
||||
return this.bMap
|
||||
}
|
||||
|
||||
func (this *FS) TestBList() *linkedlist.List[string] {
|
||||
return this.bList
|
||||
}
|
||||
|
||||
func (this *FS) bPathForHash(hash string) (path string, bName string, err error) {
|
||||
err = CheckHashErr(hash)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return this.dir + "/" + hash[:2] + "/" + hash[2:4] + BFileExt, hash[:4], nil
|
||||
}
|
||||
|
||||
func (this *FS) syncLoop() {
|
||||
if this.isClosed {
|
||||
return
|
||||
}
|
||||
|
||||
if this.opt.SyncTimeout <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var maxSyncFiles = this.opt.MaxSyncFiles
|
||||
if maxSyncFiles <= 0 {
|
||||
maxSyncFiles = 32
|
||||
}
|
||||
|
||||
var bFiles []*BlocksFile
|
||||
|
||||
this.mu.RLock()
|
||||
for _, bFile := range this.bMap {
|
||||
if time.Since(bFile.SyncAt()) > this.opt.SyncTimeout {
|
||||
bFiles = append(bFiles, bFile)
|
||||
maxSyncFiles--
|
||||
if maxSyncFiles <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
this.mu.RUnlock()
|
||||
|
||||
for _, bFile := range bFiles {
|
||||
if bFile.IsClosing() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := bFile.ForceSync()
|
||||
if err != nil {
|
||||
// check again
|
||||
if bFile.IsClosing() {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO 可以在options自定义一个logger
|
||||
log.Println("BFS", "sync failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *FS) openBFileForHashWriting(hash string) (*BlocksFile, error) {
|
||||
err := CheckHashErr(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bPath, bName, err := this.bPathForHash(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
this.mu.RLock()
|
||||
bFile, ok := this.bMap[bName]
|
||||
this.mu.RUnlock()
|
||||
if ok {
|
||||
// 调整当前BFile所在位置
|
||||
this.mu.Lock()
|
||||
|
||||
if bFile.IsClosing() {
|
||||
// TODO 需要重新等待打开
|
||||
}
|
||||
|
||||
item, itemOk := this.bItemMap[bName]
|
||||
if itemOk {
|
||||
this.bList.Remove(item)
|
||||
this.bList.Push(item)
|
||||
}
|
||||
this.mu.Unlock()
|
||||
|
||||
return bFile, nil
|
||||
}
|
||||
|
||||
return this.openBFile(bPath, bName)
|
||||
}
|
||||
|
||||
func (this *FS) openBFileForHashReading(hash string) (*BlocksFile, error) {
|
||||
err := CheckHashErr(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bPath, bName, err := this.bPathForHash(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = this.waitBFile(bPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
this.mu.Lock()
|
||||
bFile, ok := this.bMap[bName]
|
||||
if ok {
|
||||
// 调整当前BFile所在位置
|
||||
item, itemOk := this.bItemMap[bName]
|
||||
if itemOk {
|
||||
this.bList.Remove(item)
|
||||
this.bList.Push(item)
|
||||
}
|
||||
this.mu.Unlock()
|
||||
return bFile, nil
|
||||
}
|
||||
|
||||
this.mu.Unlock()
|
||||
|
||||
return this.openBFile(bPath, bName)
|
||||
}
|
||||
|
||||
func (this *FS) openBFile(bPath string, bName string) (*BlocksFile, error) {
|
||||
// check closing queue
|
||||
err := this.waitBFile(bPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
// lookup again
|
||||
bFile, ok := this.bMap[bName]
|
||||
if ok {
|
||||
return bFile, nil
|
||||
}
|
||||
|
||||
// TODO 不要把 OpenBlocksFile 放入到 mu 中?
|
||||
bFile, err = OpenBlocksFile(bPath, &BlockFileOptions{
|
||||
BytesPerSync: this.opt.BytesPerSync,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 防止被关闭
|
||||
bFile.IncrRef()
|
||||
defer bFile.DecrRef()
|
||||
|
||||
this.bMap[bName] = bFile
|
||||
|
||||
// 加入到列表中
|
||||
var item = linkedlist.NewItem(bName)
|
||||
this.bList.Push(item)
|
||||
this.bItemMap[bName] = item
|
||||
|
||||
// 检查是否超出maxOpenFiles
|
||||
if this.bList.Len() > this.opt.MaxOpenFiles {
|
||||
this.shiftOpenFiles()
|
||||
}
|
||||
|
||||
return bFile, nil
|
||||
}
|
||||
|
||||
// 处理关闭中的 BFile 们
|
||||
func (this *FS) processClosingBFiles() {
|
||||
if this.isClosed {
|
||||
return
|
||||
}
|
||||
|
||||
var bFile = <-this.closingBChan
|
||||
if bFile == nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = bFile.Close()
|
||||
|
||||
this.mu.Lock()
|
||||
delete(this.closingBMap, bFile.Filename())
|
||||
this.mu.Unlock()
|
||||
}
|
||||
|
||||
// 弹出超出BFile数量限制的BFile
|
||||
func (this *FS) shiftOpenFiles() {
|
||||
var l = this.bList.Len()
|
||||
var count = l - this.opt.MaxOpenFiles
|
||||
if count <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var bNames []string
|
||||
var searchCount int
|
||||
this.bList.Range(func(item *linkedlist.Item[string]) (goNext bool) {
|
||||
searchCount++
|
||||
|
||||
var bName = item.Value
|
||||
var bFile = this.bMap[bName]
|
||||
if bFile.CanClose() {
|
||||
bNames = append(bNames, bName)
|
||||
count--
|
||||
}
|
||||
return count > 0 && searchCount < 8 && searchCount < l-8
|
||||
})
|
||||
|
||||
for _, bName := range bNames {
|
||||
var bFile = this.bMap[bName]
|
||||
var item = this.bItemMap[bName]
|
||||
|
||||
// clean
|
||||
delete(this.bMap, bName)
|
||||
delete(this.bItemMap, bName)
|
||||
this.bList.Remove(item)
|
||||
|
||||
// add to closing queue
|
||||
this.closingBMap[bFile.Filename()] = zero.Zero{}
|
||||
|
||||
// MUST run in goroutine
|
||||
go func(bFile *BlocksFile) {
|
||||
// 因为 closingBChan 可能已经关闭
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
|
||||
this.closingBChan <- bFile
|
||||
}(bFile)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *FS) waitBFile(bPath string) error {
|
||||
this.mu.RLock()
|
||||
_, isClosing := this.closingBMap[bPath]
|
||||
this.mu.RUnlock()
|
||||
if !isClosing {
|
||||
return nil
|
||||
}
|
||||
|
||||
var maxWaits = 30_000
|
||||
for {
|
||||
this.mu.RLock()
|
||||
_, isClosing = this.closingBMap[bPath]
|
||||
this.mu.RUnlock()
|
||||
if !isClosing {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
maxWaits--
|
||||
|
||||
if maxWaits < 0 {
|
||||
return errors.New("open blocks file timeout")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
47
EdgeNode/internal/utils/bfs/fs_options.go
Normal file
47
EdgeNode/internal/utils/bfs/fs_options.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
|
||||
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FSOptions struct {
|
||||
MaxOpenFiles int
|
||||
BytesPerSync int64
|
||||
SyncTimeout time.Duration
|
||||
MaxSyncFiles int
|
||||
}
|
||||
|
||||
func (this *FSOptions) EnsureDefaults() {
|
||||
if this.MaxOpenFiles <= 0 {
|
||||
// 根据内存计算最大打开文件数
|
||||
var maxOpenFiles = memutils.SystemMemoryGB() * 128
|
||||
if maxOpenFiles > (8 << 10) {
|
||||
maxOpenFiles = 8 << 10
|
||||
}
|
||||
this.MaxOpenFiles = maxOpenFiles
|
||||
}
|
||||
if this.BytesPerSync <= 0 {
|
||||
if fsutils.DiskIsFast() {
|
||||
this.BytesPerSync = 1 << 20 // TODO 根据硬盘实际写入速度进行调整
|
||||
} else {
|
||||
this.BytesPerSync = 512 << 10
|
||||
}
|
||||
}
|
||||
if this.SyncTimeout <= 0 {
|
||||
this.SyncTimeout = 1 * time.Second
|
||||
}
|
||||
if this.MaxSyncFiles <= 0 {
|
||||
this.MaxSyncFiles = 32
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultFSOptions = &FSOptions{
|
||||
MaxOpenFiles: 1 << 10,
|
||||
BytesPerSync: 512 << 10,
|
||||
SyncTimeout: 1 * time.Second,
|
||||
MaxSyncFiles: 32,
|
||||
}
|
||||
197
EdgeNode/internal/utils/bfs/fs_test.go
Normal file
197
EdgeNode/internal/utils/bfs/fs_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
_ "github.com/iwind/TeaGo/bootstrap"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFS_OpenFileWriter(t *testing.T) {
|
||||
fs, openErr := bfs.OpenFS(Tea.Root+"/data/bfs/test", bfs.DefaultFSOptions)
|
||||
if openErr != nil {
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = fs.Close()
|
||||
}()
|
||||
|
||||
{
|
||||
writer, err := fs.OpenFileWriter(bfs.Hash("123456"), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = writer.WriteMeta(200, fasttime.Now().Unix()+3600, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = writer.WriteBody([]byte("Hello, World"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
writer, err := fs.OpenFileWriter(bfs.Hash("654321"), 100, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = writer.WriteBody([]byte("Hello, World"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_OpenFileReader(t *testing.T) {
|
||||
fs, openErr := bfs.OpenFS(Tea.Root+"/data/bfs/test", bfs.DefaultFSOptions)
|
||||
if openErr != nil {
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = fs.Close()
|
||||
}()
|
||||
|
||||
reader, err := fs.OpenFileReader(bfs.Hash("123456"), false)
|
||||
if err != nil {
|
||||
if bfs.IsNotExist(err) {
|
||||
t.Log(err)
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(string(data))
|
||||
logs.PrintAsJSON(reader.FileHeader(), t)
|
||||
}
|
||||
|
||||
func TestFS_ExistFile(t *testing.T) {
|
||||
fs, openErr := bfs.OpenFS(Tea.Root+"/data/bfs/test", bfs.DefaultFSOptions)
|
||||
if openErr != nil {
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = fs.Close()
|
||||
}()
|
||||
|
||||
exist, err := fs.ExistFile(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("exist:", exist)
|
||||
}
|
||||
|
||||
func TestFS_RemoveFile(t *testing.T) {
|
||||
fs, openErr := bfs.OpenFS(Tea.Root+"/data/bfs/test", bfs.DefaultFSOptions)
|
||||
if openErr != nil {
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = fs.Close()
|
||||
}()
|
||||
|
||||
var hash = bfs.Hash("123456")
|
||||
err := fs.RemoveFile(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
exist, err := fs.ExistFile(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("exist:", exist)
|
||||
}
|
||||
|
||||
func TestFS_OpenFileWriter_Close(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
fs, openErr := bfs.OpenFS(Tea.Root+"/data/bfs/test", &bfs.FSOptions{
|
||||
MaxOpenFiles: 99,
|
||||
})
|
||||
if openErr != nil {
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
defer func() {
|
||||
_ = fs.Close()
|
||||
}()
|
||||
|
||||
var count = 2
|
||||
if testutils.IsSingleTesting() {
|
||||
count = 100
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
//t.Log("open", i)
|
||||
writer, err := fs.OpenFileWriter(bfs.Hash(types.String(i)), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = writer.Close()
|
||||
}
|
||||
|
||||
t.Log(len(fs.TestBMap()), "block files, pid:", os.Getpid())
|
||||
|
||||
var p = func() {
|
||||
var bNames []string
|
||||
fs.TestBList().Range(func(item *linkedlist.Item[string]) (goNext bool) {
|
||||
bNames = append(bNames, item.Value)
|
||||
return true
|
||||
})
|
||||
|
||||
if len(bNames) != len(fs.TestBMap()) {
|
||||
t.Fatal("len(bNames)!=len(bMap)")
|
||||
}
|
||||
|
||||
if len(bNames) < 10 {
|
||||
t.Log("["+types.String(len(bNames))+"]", bNames)
|
||||
} else {
|
||||
t.Log("["+types.String(len(bNames))+"]", bNames[:10], "...")
|
||||
}
|
||||
}
|
||||
|
||||
p()
|
||||
|
||||
{
|
||||
writer, err := fs.OpenFileWriter(bfs.Hash(types.String(10)), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = writer.Close()
|
||||
}
|
||||
|
||||
p()
|
||||
|
||||
// testing closing
|
||||
for i := 0; i < 3; i++ {
|
||||
writer, err := fs.OpenFileWriter(bfs.Hash(types.String(0)), -1, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = writer.Close()
|
||||
}
|
||||
|
||||
p()
|
||||
}
|
||||
66
EdgeNode/internal/utils/bfs/gzip_reader_pool.go
Normal file
66
EdgeNode/internal/utils/bfs/gzip_reader_pool.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/percpu"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var SharedDecompressPool = NewGzipReaderPool()
|
||||
|
||||
type GzipReaderPool struct {
|
||||
c chan *gzip.Reader
|
||||
cList []chan *gzip.Reader
|
||||
}
|
||||
|
||||
func NewGzipReaderPool() *GzipReaderPool {
|
||||
const poolSize = 16
|
||||
|
||||
var countProcs = runtime.GOMAXPROCS(0)
|
||||
if countProcs <= 0 {
|
||||
countProcs = runtime.NumCPU()
|
||||
}
|
||||
countProcs *= 4
|
||||
|
||||
var cList []chan *gzip.Reader
|
||||
for i := 0; i < countProcs; i++ {
|
||||
cList = append(cList, make(chan *gzip.Reader, poolSize))
|
||||
}
|
||||
|
||||
return &GzipReaderPool{
|
||||
c: make(chan *gzip.Reader, poolSize),
|
||||
cList: cList,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipReaderPool) Get(rawReader io.Reader) (*gzip.Reader, error) {
|
||||
select {
|
||||
case w := <-this.getC():
|
||||
err := w.Reset(rawReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w, nil
|
||||
default:
|
||||
return gzip.NewReader(rawReader)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipReaderPool) Put(reader *gzip.Reader) {
|
||||
select {
|
||||
case this.getC() <- reader:
|
||||
default:
|
||||
// 不需要close,因为已经在使用的时候调用了
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipReaderPool) getC() chan *gzip.Reader {
|
||||
var procId = percpu.GetProcId()
|
||||
if procId < len(this.cList) {
|
||||
return this.cList[procId]
|
||||
}
|
||||
return this.c
|
||||
}
|
||||
63
EdgeNode/internal/utils/bfs/gzip_writer_pool.go
Normal file
63
EdgeNode/internal/utils/bfs/gzip_writer_pool.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/percpu"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
var SharedCompressPool = NewGzipWriterPool()
|
||||
|
||||
type GzipWriterPool struct {
|
||||
c chan *gzip.Writer
|
||||
cList []chan *gzip.Writer
|
||||
}
|
||||
|
||||
func NewGzipWriterPool() *GzipWriterPool {
|
||||
const poolSize = 16
|
||||
|
||||
var countProcs = runtime.GOMAXPROCS(0)
|
||||
if countProcs <= 0 {
|
||||
countProcs = runtime.NumCPU()
|
||||
}
|
||||
countProcs *= 4
|
||||
|
||||
var cList []chan *gzip.Writer
|
||||
for i := 0; i < countProcs; i++ {
|
||||
cList = append(cList, make(chan *gzip.Writer, poolSize))
|
||||
}
|
||||
|
||||
return &GzipWriterPool{
|
||||
c: make(chan *gzip.Writer, poolSize),
|
||||
cList: cList,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipWriterPool) Get(rawWriter io.Writer) (*gzip.Writer, error) {
|
||||
select {
|
||||
case w := <-this.getC():
|
||||
w.Reset(rawWriter)
|
||||
return w, nil
|
||||
default:
|
||||
return gzip.NewWriterLevel(rawWriter, gzip.BestSpeed)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipWriterPool) Put(writer *gzip.Writer) {
|
||||
select {
|
||||
case this.getC() <- writer:
|
||||
default:
|
||||
// 不需要close,因为已经在使用的时候调用了
|
||||
}
|
||||
}
|
||||
|
||||
func (this *GzipWriterPool) getC() chan *gzip.Writer {
|
||||
var procId = percpu.GetProcId()
|
||||
if procId < len(this.cList) {
|
||||
return this.cList[procId]
|
||||
}
|
||||
return this.c
|
||||
}
|
||||
36
EdgeNode/internal/utils/bfs/hash.go
Normal file
36
EdgeNode/internal/utils/bfs/hash.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
stringutil "github.com/iwind/TeaGo/utils/string"
|
||||
)
|
||||
|
||||
var HashLen = 32
|
||||
|
||||
// CheckHash check hash string format
|
||||
func CheckHash(hash string) bool {
|
||||
if len(hash) != HashLen {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, b := range hash {
|
||||
if !((b >= '0' && b <= '9') || (b >= 'a' && b <= 'f')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func CheckHashErr(hash string) error {
|
||||
if CheckHash(hash) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("check hash '%s' failed: %w", hash, ErrInvalidHash)
|
||||
}
|
||||
|
||||
func Hash(s string) string {
|
||||
return stringutil.Md5(s)
|
||||
}
|
||||
27
EdgeNode/internal/utils/bfs/hash_test.go
Normal file
27
EdgeNode/internal/utils/bfs/hash_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckHash(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
a.IsFalse(bfs.CheckHash("123456"))
|
||||
a.IsFalse(bfs.CheckHash(strings.Repeat("A", 32)))
|
||||
a.IsTrue(bfs.CheckHash(strings.Repeat("a", 32)))
|
||||
a.IsTrue(bfs.CheckHash(bfs.Hash("123456")))
|
||||
}
|
||||
|
||||
func BenchmarkCheckHashErr(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = bfs.CheckHash(bfs.Hash(strconv.Itoa(rand.Int())))
|
||||
}
|
||||
}
|
||||
52
EdgeNode/internal/utils/bfs/meta_block.go
Normal file
52
EdgeNode/internal/utils/bfs/meta_block.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type MetaAction = byte
|
||||
|
||||
const (
|
||||
MetaActionNew MetaAction = '+'
|
||||
MetaActionRemove MetaAction = '-'
|
||||
)
|
||||
|
||||
func EncodeMetaBlock(action MetaAction, hash string, data []byte) ([]byte, error) {
|
||||
var hl = len(hash)
|
||||
if hl != HashLen {
|
||||
return nil, errors.New("invalid hash length")
|
||||
}
|
||||
|
||||
var l = 1 /** Action **/ + hl /** Hash **/ + len(data)
|
||||
|
||||
var b = make([]byte, 4 /** Len **/ +l)
|
||||
binary.BigEndian.PutUint32(b, uint32(l))
|
||||
b[4] = action
|
||||
copy(b[5:], hash)
|
||||
copy(b[5+hl:], data)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func DecodeMetaBlock(blockBytes []byte) (action MetaAction, hash string, data []byte, err error) {
|
||||
var dataOffset = 4 /** Len **/ + HashLen + 1 /** Action **/
|
||||
if len(blockBytes) < dataOffset {
|
||||
err = errors.New("decode failed: invalid block data")
|
||||
return
|
||||
}
|
||||
|
||||
action = blockBytes[4]
|
||||
hash = string(blockBytes[5 : 5+HashLen])
|
||||
|
||||
if action == MetaActionNew {
|
||||
var rawData = blockBytes[dataOffset:]
|
||||
if len(rawData) > 0 {
|
||||
data = make([]byte, len(rawData))
|
||||
copy(data, rawData)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
52
EdgeNode/internal/utils/bfs/meta_block_test.go
Normal file
52
EdgeNode/internal/utils/bfs/meta_block_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMetaBlock(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
{
|
||||
var srcHash = bfs.Hash("a")
|
||||
b, err := bfs.EncodeMetaBlock(bfs.MetaActionNew, srcHash, []byte{1, 2, 3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(b)
|
||||
|
||||
{
|
||||
action, hash, data, decodeErr := bfs.DecodeMetaBlock(b)
|
||||
if decodeErr != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a.IsTrue(action == bfs.MetaActionNew)
|
||||
a.IsTrue(hash == srcHash)
|
||||
a.IsTrue(bytes.Equal(data, []byte{1, 2, 3}))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var srcHash = bfs.Hash("bcd")
|
||||
|
||||
b, err := bfs.EncodeMetaBlock(bfs.MetaActionRemove, srcHash, []byte{1, 2, 3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(b)
|
||||
{
|
||||
action, hash, data, decodeErr := bfs.DecodeMetaBlock(b)
|
||||
if decodeErr != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a.IsTrue(action == bfs.MetaActionRemove)
|
||||
a.IsTrue(hash == srcHash)
|
||||
a.IsTrue(len(data) == 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
380
EdgeNode/internal/utils/bfs/meta_file.go
Normal file
380
EdgeNode/internal/utils/bfs/meta_file.go
Normal file
@@ -0,0 +1,380 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const MFileExt = ".m"
|
||||
const Version1 = 1
|
||||
|
||||
type MetaFile struct {
|
||||
fp *os.File
|
||||
filename string
|
||||
headerMap map[string]*LazyFileHeader // hash => *LazyFileHeader
|
||||
mu *sync.RWMutex // TODO 考虑单独一个,不要和bFile共享?
|
||||
|
||||
isModified bool
|
||||
modifiedHashMap map[string]zero.Zero // hash => Zero
|
||||
}
|
||||
|
||||
func OpenMetaFile(filename string, mu *sync.RWMutex) (*MetaFile, error) {
|
||||
fp, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mFile = &MetaFile{
|
||||
filename: filename,
|
||||
fp: fp,
|
||||
headerMap: map[string]*LazyFileHeader{},
|
||||
mu: mu,
|
||||
modifiedHashMap: map[string]zero.Zero{},
|
||||
}
|
||||
|
||||
// 从文件中加载已有的文件头信息
|
||||
err = mFile.load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mFile, nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) load() error {
|
||||
AckReadThread()
|
||||
_, err := this.fp.Seek(0, io.SeekStart)
|
||||
ReleaseReadThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO 检查文件是否完整
|
||||
|
||||
var buf = make([]byte, 4<<10)
|
||||
var blockBytes []byte
|
||||
for {
|
||||
AckReadThread()
|
||||
n, readErr := this.fp.Read(buf)
|
||||
ReleaseReadThread()
|
||||
if n > 0 {
|
||||
blockBytes = append(blockBytes, buf[:n]...)
|
||||
for len(blockBytes) > 4 {
|
||||
var l = int(binary.BigEndian.Uint32(blockBytes[:4])) + 4 /* Len **/
|
||||
if len(blockBytes) < l {
|
||||
break
|
||||
}
|
||||
|
||||
action, hash, data, decodeErr := DecodeMetaBlock(blockBytes[:l])
|
||||
if decodeErr != nil {
|
||||
return decodeErr
|
||||
}
|
||||
|
||||
switch action {
|
||||
case MetaActionNew:
|
||||
this.headerMap[hash] = NewLazyFileHeaderFromData(data)
|
||||
case MetaActionRemove:
|
||||
delete(this.headerMap, hash)
|
||||
}
|
||||
|
||||
blockBytes = blockBytes[l:]
|
||||
}
|
||||
}
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
break
|
||||
}
|
||||
return readErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) WriteMeta(hash string, status int, expiresAt int64, expectedFileSize int64) error {
|
||||
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
this.headerMap[hash] = NewLazyFileHeader(&FileHeader{
|
||||
Version: Version1,
|
||||
ExpiresAt: expiresAt,
|
||||
Status: status,
|
||||
ExpiredBodySize: expectedFileSize,
|
||||
IsWriting: true,
|
||||
})
|
||||
|
||||
this.modifiedHashMap[hash] = zero.Zero{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) WriteHeaderBlockUnsafe(hash string, bOffsetFrom int64, bOffsetTo int64) error {
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
header, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO 合并相邻block
|
||||
header.HeaderBlocks = append(header.HeaderBlocks, BlockInfo{
|
||||
BFileOffsetFrom: bOffsetFrom,
|
||||
BFileOffsetTo: bOffsetTo,
|
||||
})
|
||||
|
||||
this.modifiedHashMap[hash] = zero.Zero{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) WriteBodyBlockUnsafe(hash string, bOffsetFrom int64, bOffsetTo int64, originOffsetFrom int64, originOffsetTo int64) error {
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
header, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO 合并相邻block
|
||||
header.BodyBlocks = append(header.BodyBlocks, BlockInfo{
|
||||
OriginOffsetFrom: originOffsetFrom,
|
||||
OriginOffsetTo: originOffsetTo,
|
||||
BFileOffsetFrom: bOffsetFrom,
|
||||
BFileOffsetTo: bOffsetTo,
|
||||
})
|
||||
|
||||
this.modifiedHashMap[hash] = zero.Zero{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) WriteClose(hash string, headerSize int64, bodySize int64) error {
|
||||
// TODO 考虑单个hash多次重复调用的情况
|
||||
|
||||
this.mu.Lock()
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
if !ok {
|
||||
this.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
header, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
this.mu.Unlock()
|
||||
|
||||
// TODO 检查bodySize和expectedBodySize是否一致,如果不一致则从headerMap中删除
|
||||
|
||||
header.ModifiedAt = fasttime.Now().Unix()
|
||||
header.HeaderSize = headerSize
|
||||
header.BodySize = bodySize
|
||||
header.Compact()
|
||||
|
||||
blockBytes, err := header.Encode(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
AckReadThread()
|
||||
_, err = this.fp.Seek(0, io.SeekEnd)
|
||||
ReleaseReadThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
_, err = this.fp.Write(blockBytes)
|
||||
ReleaseWriteThread()
|
||||
|
||||
this.isModified = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (this *MetaFile) RemoveFile(hash string) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
_, ok := this.headerMap[hash]
|
||||
if ok {
|
||||
delete(this.headerMap, hash)
|
||||
}
|
||||
|
||||
if ok {
|
||||
blockBytes, err := EncodeMetaBlock(MetaActionRemove, hash, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
_, err = this.fp.Write(blockBytes)
|
||||
ReleaseWriteThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.isModified = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *MetaFile) FileHeader(hash string) (header *FileHeader, ok bool) {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
|
||||
if ok {
|
||||
var err error
|
||||
header, err = lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (this *MetaFile) FileHeaderUnsafe(hash string) (header *FileHeader, ok bool) {
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
|
||||
if ok {
|
||||
var err error
|
||||
header, err = lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *MetaFile) CloneFileHeader(hash string) (header *FileHeader, ok bool) {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
header, err = lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
|
||||
header = header.Clone()
|
||||
return
|
||||
}
|
||||
|
||||
func (this *MetaFile) FileHeaders() map[string]*LazyFileHeader {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
return this.headerMap
|
||||
}
|
||||
|
||||
func (this *MetaFile) ExistFile(hash string) bool {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
_, ok := this.headerMap[hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Compact the meta file
|
||||
// TODO 考虑自动Compact的时机(脏数据比例?)
|
||||
func (this *MetaFile) Compact() error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
var buf = bytes.NewBuffer(nil)
|
||||
for hash, lazyHeader := range this.headerMap {
|
||||
header, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockBytes, err := header.Encode(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Write(blockBytes)
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
err := this.fp.Truncate(int64(buf.Len()))
|
||||
ReleaseWriteThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
AckReadThread()
|
||||
_, err = this.fp.Seek(0, io.SeekStart)
|
||||
ReleaseReadThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
_, err = this.fp.Write(buf.Bytes())
|
||||
ReleaseWriteThread()
|
||||
this.isModified = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (this *MetaFile) SyncUnsafe() error {
|
||||
if !this.isModified {
|
||||
return nil
|
||||
}
|
||||
|
||||
AckWriteThread()
|
||||
err := this.fp.Sync()
|
||||
ReleaseWriteThread()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for hash := range this.modifiedHashMap {
|
||||
lazyHeader, ok := this.headerMap[hash]
|
||||
if ok {
|
||||
header, decodeErr := lazyHeader.FileHeaderUnsafe()
|
||||
if decodeErr != nil {
|
||||
return decodeErr
|
||||
}
|
||||
header.IsWriting = false
|
||||
}
|
||||
}
|
||||
|
||||
this.isModified = false
|
||||
this.modifiedHashMap = map[string]zero.Zero{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close 关闭当前文件
|
||||
func (this *MetaFile) Close() error {
|
||||
return this.fp.Close()
|
||||
}
|
||||
|
||||
// RemoveAll 删除所有数据
|
||||
func (this *MetaFile) RemoveAll() error {
|
||||
_ = this.fp.Close()
|
||||
return os.Remove(this.fp.Name())
|
||||
}
|
||||
196
EdgeNode/internal/utils/bfs/meta_file_test.go
Normal file
196
EdgeNode/internal/utils/bfs/meta_file_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bfs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewMetaFile(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mFile.Close()
|
||||
}()
|
||||
|
||||
var header, _ = mFile.FileHeader(bfs.Hash("123456"))
|
||||
logs.PrintAsJSON(header, t)
|
||||
//logs.PrintAsJSON(mFile.Headers(), t)
|
||||
}
|
||||
|
||||
func TestNewMetaFile_Large(t *testing.T) {
|
||||
var count = 2
|
||||
|
||||
if testutils.IsSingleTesting() {
|
||||
count = 100
|
||||
}
|
||||
|
||||
var before = time.Now()
|
||||
for i := 0; i < count; i++ {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test2.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
if bfs.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = mFile.Close()
|
||||
}
|
||||
var costMs = time.Since(before).Seconds() * 1000
|
||||
t.Logf("cost: %.2fms, qps: %.2fms/file", costMs, costMs/float64(count))
|
||||
}
|
||||
|
||||
func TestNewMetaFile_Memory(t *testing.T) {
|
||||
var count = 2
|
||||
|
||||
if testutils.IsSingleTesting() {
|
||||
count = 100
|
||||
}
|
||||
|
||||
var stat1 = testutils.ReadMemoryStat()
|
||||
|
||||
var mFiles []*bfs.MetaFile
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test2.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
if bfs.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_ = mFile.Close()
|
||||
mFiles = append(mFiles, mFile)
|
||||
}
|
||||
|
||||
var stat2 = testutils.ReadMemoryStat()
|
||||
t.Log((stat2.HeapInuse-stat1.HeapInuse)>>20, "MiB")
|
||||
}
|
||||
|
||||
func TestMetaFile_FileHeaders(t *testing.T) {
|
||||
mFile, openErr := bfs.OpenMetaFile("testdata/test2.m", &sync.RWMutex{})
|
||||
if openErr != nil {
|
||||
if bfs.IsNotExist(openErr) {
|
||||
return
|
||||
}
|
||||
t.Fatal(openErr)
|
||||
}
|
||||
_ = mFile.Close()
|
||||
for hash, lazyHeader := range mFile.FileHeaders() {
|
||||
header, err := lazyHeader.FileHeaderUnsafe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(hash, header.ModifiedAt, header.BodySize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetaFile_WriteMeta(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mFile.Close()
|
||||
}()
|
||||
|
||||
var hash = bfs.Hash("123456")
|
||||
err = mFile.WriteMeta(hash, 200, fasttime.Now().Unix()+3600, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mFile.WriteHeaderBlockUnsafe(hash, 123, 223)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mFile.WriteBodyBlockUnsafe(hash, 223, 323, 0, 100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mFile.WriteBodyBlockUnsafe(hash, 323, 423, 100, 200)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mFile.WriteClose(hash, 100, 200)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//logs.PrintAsJSON(mFile.Header(hash), t)
|
||||
}
|
||||
|
||||
func TestMetaFile_Write(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mFile.Close()
|
||||
}()
|
||||
|
||||
var hash = bfs.Hash("123456")
|
||||
|
||||
err = mFile.WriteBodyBlockUnsafe(hash, 0, 100, 0, 100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = mFile.WriteClose(hash, 0, 100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetaFile_RemoveFile(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mFile.Close()
|
||||
}()
|
||||
|
||||
err = mFile.RemoveFile(bfs.Hash("123456"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetaFile_Compact(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_ = mFile.Close()
|
||||
}()
|
||||
|
||||
err = mFile.Compact()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetaFile_RemoveAll(t *testing.T) {
|
||||
mFile, err := bfs.OpenMetaFile("testdata/test.m", &sync.RWMutex{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = mFile.RemoveAll()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
25
EdgeNode/internal/utils/bfs/threads_limiter.go
Normal file
25
EdgeNode/internal/utils/bfs/threads_limiter.go
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package bfs
|
||||
|
||||
import "github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
|
||||
// TODO 线程数可以根据硬盘数量动态调整?
|
||||
var readThreadsLimiter = make(chan zero.Zero, 8)
|
||||
var writeThreadsLimiter = make(chan zero.Zero, 8)
|
||||
|
||||
func AckReadThread() {
|
||||
readThreadsLimiter <- zero.Zero{}
|
||||
}
|
||||
|
||||
func ReleaseReadThread() {
|
||||
<-readThreadsLimiter
|
||||
}
|
||||
|
||||
func AckWriteThread() {
|
||||
writeThreadsLimiter <- zero.Zero{}
|
||||
}
|
||||
|
||||
func ReleaseWriteThread() {
|
||||
<-writeThreadsLimiter
|
||||
}
|
||||
40
EdgeNode/internal/utils/buffer_pool.go
Normal file
40
EdgeNode/internal/utils/buffer_pool.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var SharedBufferPool = NewBufferPool()
|
||||
|
||||
// BufferPool pool for get byte slice
|
||||
type BufferPool struct {
|
||||
rawPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewBufferPool 创建新对象
|
||||
func NewBufferPool() *BufferPool {
|
||||
var pool = &BufferPool{}
|
||||
pool.rawPool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytes.Buffer{}
|
||||
},
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
||||
// Get 获取一个新的Buffer
|
||||
func (this *BufferPool) Get() (b *bytes.Buffer) {
|
||||
var buffer = this.rawPool.Get().(*bytes.Buffer)
|
||||
if buffer.Len() > 0 {
|
||||
buffer.Reset()
|
||||
}
|
||||
return buffer
|
||||
}
|
||||
|
||||
// Put 放回一个使用过的byte slice
|
||||
func (this *BufferPool) Put(b *bytes.Buffer) {
|
||||
this.rawPool.Put(b)
|
||||
}
|
||||
73
EdgeNode/internal/utils/buffer_pool_test.go
Normal file
73
EdgeNode/internal/utils/buffer_pool_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewBufferPool(t *testing.T) {
|
||||
var pool = utils.NewBufferPool()
|
||||
var b = pool.Get()
|
||||
b.WriteString("Hello, World")
|
||||
t.Log(b.String())
|
||||
|
||||
pool.Put(b)
|
||||
t.Log(b.String())
|
||||
|
||||
b = pool.Get()
|
||||
t.Log(b.String())
|
||||
}
|
||||
|
||||
func BenchmarkNewBufferPool1(b *testing.B) {
|
||||
var data = []byte(strings.Repeat("Hello", 1024))
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buffer = &bytes.Buffer{}
|
||||
buffer.Write(data)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNewBufferPool2(b *testing.B) {
|
||||
var pool = utils.NewBufferPool()
|
||||
var data = []byte(strings.Repeat("Hello", 1024))
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buffer = pool.Get()
|
||||
buffer.Write(data)
|
||||
pool.Put(buffer)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNewBufferPool3(b *testing.B) {
|
||||
var pool = utils.NewBufferPool()
|
||||
var dataString = strings.Repeat("Hello", 1024)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buffer = pool.Get()
|
||||
buffer.Write([]byte(dataString))
|
||||
pool.Put(buffer)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNewBufferPool4(b *testing.B) {
|
||||
var pool = utils.NewBufferPool()
|
||||
var dataString = strings.Repeat("Hello", 1024)
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buffer = pool.Get()
|
||||
buffer.WriteString(dataString)
|
||||
pool.Put(buffer)
|
||||
}
|
||||
})
|
||||
}
|
||||
28
EdgeNode/internal/utils/byte/utils.go
Normal file
28
EdgeNode/internal/utils/byte/utils.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package byteutils
|
||||
|
||||
// Copy bytes
|
||||
func Copy(b []byte) []byte {
|
||||
var l = len(b)
|
||||
if l == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
var d = make([]byte, l)
|
||||
copy(d, b)
|
||||
return d
|
||||
}
|
||||
|
||||
// Append bytes
|
||||
func Append(b []byte, b2 ...byte) []byte {
|
||||
return append(Copy(b), b2...)
|
||||
}
|
||||
|
||||
// Concat bytes
|
||||
func Concat(b []byte, b2 ...[]byte) []byte {
|
||||
b = Copy(b)
|
||||
for _, b3 := range b2 {
|
||||
b = append(b, b3...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
56
EdgeNode/internal/utils/byte/utils_test.go
Normal file
56
EdgeNode/internal/utils/byte/utils_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package byteutils_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
byteutils "github.com/TeaOSLab/EdgeNode/internal/utils/byte"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var prefix []byte
|
||||
prefix = append(prefix, 1, 2, 3)
|
||||
t.Log(prefix, byteutils.Copy(prefix))
|
||||
a.IsTrue(bytes.Equal(byteutils.Copy(prefix), []byte{1, 2, 3}))
|
||||
}
|
||||
|
||||
func TestAppend(t *testing.T) {
|
||||
var as = assert.NewAssertion(t)
|
||||
|
||||
var prefix []byte
|
||||
prefix = append(prefix, 1, 2, 3)
|
||||
|
||||
// [1 2 3 4 5 6] [1 2 3 7]
|
||||
var a = byteutils.Append(prefix, 4, 5, 6)
|
||||
var b = byteutils.Append(prefix, 7)
|
||||
t.Log(a, b)
|
||||
|
||||
as.IsTrue(bytes.Equal(a, []byte{1, 2, 3, 4, 5, 6}))
|
||||
as.IsTrue(bytes.Equal(b, []byte{1, 2, 3, 7}))
|
||||
}
|
||||
|
||||
func TestConcat(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var prefix []byte
|
||||
prefix = append(prefix, 1, 2, 3)
|
||||
|
||||
var b = byteutils.Concat(prefix, []byte{4, 5, 6}, []byte{7})
|
||||
t.Log(b)
|
||||
|
||||
a.IsTrue(bytes.Equal(b, []byte{1, 2, 3, 4, 5, 6, 7}))
|
||||
}
|
||||
|
||||
func TestAppend_Raw(t *testing.T) {
|
||||
var prefix []byte
|
||||
prefix = append(prefix, 1, 2, 3)
|
||||
|
||||
// [1 2 3 7 5 6] [1 2 3 7]
|
||||
var a = append(prefix, 4, 5, 6)
|
||||
var b = append(prefix, 7)
|
||||
t.Log(a, b)
|
||||
}
|
||||
52
EdgeNode/internal/utils/bytepool/byte_pool.go
Normal file
52
EdgeNode/internal/utils/bytepool/byte_pool.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package bytepool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var Pool1k = NewPool(1 << 10)
|
||||
var Pool4k = NewPool(4 << 10)
|
||||
var Pool16k = NewPool(16 << 10)
|
||||
var Pool32k = NewPool(32 << 10)
|
||||
|
||||
type Buf struct {
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// Pool for get byte slice
|
||||
type Pool struct {
|
||||
length int
|
||||
rawPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewPool 创建新对象
|
||||
func NewPool(length int) *Pool {
|
||||
if length < 0 {
|
||||
length = 1024
|
||||
}
|
||||
return &Pool{
|
||||
length: length,
|
||||
rawPool: &sync.Pool{
|
||||
New: func() any {
|
||||
return &Buf{
|
||||
Bytes: make([]byte, length),
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get 获取一个新的byte slice
|
||||
func (this *Pool) Get() *Buf {
|
||||
return this.rawPool.Get().(*Buf)
|
||||
}
|
||||
|
||||
// Put 放回一个使用过的byte slice
|
||||
func (this *Pool) Put(ptr *Buf) {
|
||||
this.rawPool.Put(ptr)
|
||||
}
|
||||
|
||||
// Length 单个字节slice长度
|
||||
func (this *Pool) Length() int {
|
||||
return this.length
|
||||
}
|
||||
272
EdgeNode/internal/utils/bytepool/byte_pool_test.go
Normal file
272
EdgeNode/internal/utils/bytepool/byte_pool_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package bytepool_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/bytepool"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytePool_Memory(t *testing.T) {
|
||||
var stat1 = &runtime.MemStats{}
|
||||
runtime.ReadMemStats(stat1)
|
||||
|
||||
var pool = bytepool.NewPool(32 * 1024)
|
||||
for i := 0; i < 20480; i++ {
|
||||
pool.Put(&bytepool.Buf{
|
||||
Bytes: make([]byte, 32*1024),
|
||||
})
|
||||
}
|
||||
|
||||
//pool.Purge()
|
||||
|
||||
//time.Sleep(60 * time.Second)
|
||||
|
||||
runtime.GC()
|
||||
|
||||
var stat2 = &runtime.MemStats{}
|
||||
runtime.ReadMemStats(stat2)
|
||||
t.Log((stat2.HeapInuse-stat1.HeapInuse)/1024/1024, "MB,")
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Get(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
var pool = bytepool.NewPool(1)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buf = pool.Get()
|
||||
_ = buf
|
||||
pool.Put(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Get_Parallel(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
var pool = bytepool.NewPool(1024)
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get()
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Get_Sync(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, 1024)
|
||||
},
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get()
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Get_Sync2(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytepool.Buf{
|
||||
Bytes: make([]byte, 1024),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get()
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Bytes_4(b *testing.B) {
|
||||
const size = 4 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, size)
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().([]byte)
|
||||
copy(buf, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Wrapper_4(b *testing.B) {
|
||||
const size = 4 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytepool.Buf{
|
||||
Bytes: make([]byte, size),
|
||||
}
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().(*bytepool.Buf)
|
||||
copy(buf.Bytes, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Bytes_16(b *testing.B) {
|
||||
const size = 16 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, size)
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().([]byte)
|
||||
copy(buf, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Wrapper_16(b *testing.B) {
|
||||
const size = 16 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytepool.Buf{
|
||||
Bytes: make([]byte, size),
|
||||
}
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().(*bytepool.Buf)
|
||||
copy(buf.Bytes, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Wrapper_Buf_16(b *testing.B) {
|
||||
const size = 16 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytepool.Buf{
|
||||
Bytes: make([]byte, size),
|
||||
}
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var bytesPtr = pool.Get().(*bytepool.Buf)
|
||||
var buf = bytesPtr.Bytes
|
||||
copy(buf, data)
|
||||
pool.Put(bytesPtr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Wrapper_BytePool_16(b *testing.B) {
|
||||
const size = 16 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var bytesPtr = bytepool.Pool16k.Get()
|
||||
copy(bytesPtr.Bytes, data)
|
||||
bytepool.Pool16k.Put(bytesPtr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Bytes_32(b *testing.B) {
|
||||
const size = 32 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return make([]byte, size)
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().([]byte)
|
||||
copy(buf, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBytePool_Copy_Wrapper_32(b *testing.B) {
|
||||
const size = 32 << 10
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, size)
|
||||
|
||||
var pool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &bytepool.Buf{
|
||||
Bytes: make([]byte, size),
|
||||
}
|
||||
},
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var buf = pool.Get().(*bytepool.Buf)
|
||||
copy(buf.Bytes, data)
|
||||
pool.Put(buf)
|
||||
}
|
||||
})
|
||||
}
|
||||
163
EdgeNode/internal/utils/cachehits/stat.go
Normal file
163
EdgeNode/internal/utils/cachehits/stat.go
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package cachehits
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/idles"
|
||||
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const countSamples = 100_000
|
||||
|
||||
type Item struct {
|
||||
countHits uint64
|
||||
countCached uint64
|
||||
timestamp int64
|
||||
|
||||
isGood bool
|
||||
isBad bool
|
||||
}
|
||||
|
||||
type Stat struct {
|
||||
goodRatio uint64
|
||||
maxItems int
|
||||
|
||||
itemMap map[string]*Item // category => *Item
|
||||
mu *sync.RWMutex
|
||||
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
func NewStat(goodRatio uint64) *Stat {
|
||||
if goodRatio == 0 {
|
||||
goodRatio = 5
|
||||
}
|
||||
|
||||
var maxItems = memutils.SystemMemoryGB() * 10_000
|
||||
if maxItems <= 0 {
|
||||
maxItems = 100_000
|
||||
}
|
||||
|
||||
var stat = &Stat{
|
||||
goodRatio: goodRatio,
|
||||
itemMap: map[string]*Item{},
|
||||
mu: &sync.RWMutex{},
|
||||
ticker: time.NewTicker(24 * time.Hour),
|
||||
maxItems: maxItems,
|
||||
}
|
||||
|
||||
goman.New(func() {
|
||||
stat.init()
|
||||
})
|
||||
return stat
|
||||
}
|
||||
|
||||
func (this *Stat) init() {
|
||||
idles.RunTicker(this.ticker, func() {
|
||||
var currentTime = fasttime.Now().Unix()
|
||||
|
||||
this.mu.RLock()
|
||||
for _, item := range this.itemMap {
|
||||
if item.timestamp < currentTime-7*24*86400 {
|
||||
// reset
|
||||
item.countHits = 0
|
||||
item.countCached = 1
|
||||
item.timestamp = currentTime
|
||||
item.isGood = false
|
||||
item.isBad = false
|
||||
}
|
||||
}
|
||||
this.mu.RUnlock()
|
||||
})
|
||||
}
|
||||
|
||||
func (this *Stat) IncreaseCached(category string) {
|
||||
this.mu.RLock()
|
||||
var item = this.itemMap[category]
|
||||
if item != nil {
|
||||
if item.isGood || item.isBad {
|
||||
this.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddUint64(&item.countCached, 1)
|
||||
this.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
this.mu.RUnlock()
|
||||
|
||||
this.mu.Lock()
|
||||
|
||||
if len(this.itemMap) > this.maxItems {
|
||||
// remove one randomly
|
||||
for k := range this.itemMap {
|
||||
delete(this.itemMap, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
this.itemMap[category] = &Item{
|
||||
countHits: 0,
|
||||
countCached: 1,
|
||||
timestamp: fasttime.Now().Unix(),
|
||||
}
|
||||
this.mu.Unlock()
|
||||
}
|
||||
|
||||
func (this *Stat) IncreaseHit(category string) {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
var item = this.itemMap[category]
|
||||
if item != nil {
|
||||
if item.isGood || item.isBad {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddUint64(&item.countHits, 1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Stat) IsGood(category string) bool {
|
||||
this.mu.RLock()
|
||||
defer func() {
|
||||
this.mu.RUnlock()
|
||||
}()
|
||||
|
||||
var item = this.itemMap[category]
|
||||
if item != nil {
|
||||
if item.isBad {
|
||||
return false
|
||||
}
|
||||
if item.isGood {
|
||||
return true
|
||||
}
|
||||
|
||||
if item.countCached > countSamples && (Tea.IsTesting() || item.timestamp < fasttime.Now().Unix()-600) /** 10 minutes ago **/ {
|
||||
var isGood = item.countHits*100/item.countCached >= this.goodRatio
|
||||
if isGood {
|
||||
item.isGood = true
|
||||
} else {
|
||||
item.isBad = true
|
||||
}
|
||||
|
||||
return isGood
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *Stat) Len() int {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
return len(this.itemMap)
|
||||
}
|
||||
107
EdgeNode/internal/utils/cachehits/stat_test.go
Normal file
107
EdgeNode/internal/utils/cachehits/stat_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package cachehits_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/cachehits"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/rands"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewStat(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
{
|
||||
var stat = cachehits.NewStat(20)
|
||||
for i := 0; i < 1000; i++ {
|
||||
stat.IncreaseCached("a")
|
||||
}
|
||||
|
||||
a.IsTrue(stat.IsGood("a"))
|
||||
}
|
||||
|
||||
{
|
||||
var stat = cachehits.NewStat(5)
|
||||
for i := 0; i < 10000; i++ {
|
||||
stat.IncreaseCached("a")
|
||||
}
|
||||
for i := 0; i < 500; i++ {
|
||||
stat.IncreaseHit("a")
|
||||
}
|
||||
|
||||
stat.IncreaseHit("b") // empty
|
||||
|
||||
a.IsTrue(stat.IsGood("a"))
|
||||
a.IsTrue(stat.IsGood("b"))
|
||||
}
|
||||
|
||||
{
|
||||
var stat = cachehits.NewStat(10)
|
||||
for i := 0; i < 10000; i++ {
|
||||
stat.IncreaseCached("a")
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
stat.IncreaseHit("a")
|
||||
}
|
||||
|
||||
stat.IncreaseHit("b") // empty
|
||||
|
||||
a.IsTrue(stat.IsGood("a"))
|
||||
a.IsTrue(stat.IsGood("b"))
|
||||
}
|
||||
|
||||
{
|
||||
var stat = cachehits.NewStat(5)
|
||||
for i := 0; i < 100001; i++ {
|
||||
stat.IncreaseCached("a")
|
||||
}
|
||||
for i := 0; i < 4999; i++ {
|
||||
stat.IncreaseHit("a")
|
||||
}
|
||||
|
||||
a.IsFalse(stat.IsGood("a"))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStat_Memory(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var stat = cachehits.NewStat(20)
|
||||
for i := 0; i < 10_000_000; i++ {
|
||||
stat.IncreaseCached("a" + types.String(i))
|
||||
}
|
||||
|
||||
time.Sleep(60 * time.Second)
|
||||
|
||||
t.Log(stat.Len())
|
||||
}
|
||||
|
||||
func BenchmarkStat(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var stat = cachehits.NewStat(5)
|
||||
for i := 0; i < 1_000_000; i++ {
|
||||
stat.IncreaseCached("a" + types.String(i))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var key = strconv.Itoa(rands.Int(0, 100_000))
|
||||
stat.IncreaseCached(key)
|
||||
if rands.Int(0, 3) == 0 {
|
||||
stat.IncreaseHit(key)
|
||||
}
|
||||
_ = stat.IsGood(key)
|
||||
}
|
||||
})
|
||||
}
|
||||
181
EdgeNode/internal/utils/clock/manager.go
Normal file
181
EdgeNode/internal/utils/clock/manager.go
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package clock
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/TeaOSLab/EdgeCommon/pkg/nodeconfigs"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
executils "github.com/TeaOSLab/EdgeNode/internal/utils/exec"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"net"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var hasSynced = false
|
||||
var sharedClockManager = NewClockManager()
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
events.On(events.EventLoaded, func() {
|
||||
goman.New(sharedClockManager.Start)
|
||||
})
|
||||
events.On(events.EventReload, func() {
|
||||
if !hasSynced {
|
||||
hasSynced = true
|
||||
|
||||
goman.New(func() {
|
||||
err := sharedClockManager.Sync()
|
||||
if err != nil {
|
||||
remotelogs.Warn("CLOCK", "sync clock failed: "+err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type ClockManager struct {
|
||||
lastFailAt int64
|
||||
}
|
||||
|
||||
func NewClockManager() *ClockManager {
|
||||
return &ClockManager{}
|
||||
}
|
||||
|
||||
// Start 启动
|
||||
func (this *ClockManager) Start() {
|
||||
var ticker = time.NewTicker(1 * time.Hour)
|
||||
for range ticker.C {
|
||||
err := this.Sync()
|
||||
if err != nil {
|
||||
var currentTimestamp = time.Now().Unix()
|
||||
|
||||
// 每天只提醒一次错误
|
||||
if currentTimestamp-this.lastFailAt > 86400 {
|
||||
remotelogs.Warn("CLOCK", "sync clock failed: "+err.Error())
|
||||
this.lastFailAt = currentTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync 自动校对时间
|
||||
func (this *ClockManager) Sync() error {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil
|
||||
}
|
||||
|
||||
nodeConfig, _ := nodeconfigs.SharedNodeConfig()
|
||||
if nodeConfig == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config = nodeConfig.Clock
|
||||
if config == nil || !config.AutoSync {
|
||||
return nil
|
||||
}
|
||||
|
||||
// check chrony
|
||||
if config.CheckChrony {
|
||||
chronycExe, err := executils.LookPath("chronyc")
|
||||
if err == nil && len(chronycExe) > 0 {
|
||||
var chronyCmd = executils.NewTimeoutCmd(3*time.Second, chronycExe, "tracking")
|
||||
err = chronyCmd.Run()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var server = config.Server
|
||||
if len(server) == 0 {
|
||||
server = "pool.ntp.org"
|
||||
}
|
||||
|
||||
ntpdate, err := executils.LookPath("ntpdate")
|
||||
if err != nil {
|
||||
// 使用 date 命令设置
|
||||
// date --set TIME
|
||||
dateExe, err := executils.LookPath("date")
|
||||
if err == nil {
|
||||
currentTime, err := this.ReadServer(server)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read server failed: %w", err)
|
||||
}
|
||||
|
||||
var delta = time.Now().Unix() - currentTime.Unix()
|
||||
if delta > 1 || delta < -1 { // 相差比较大的时候才会同步
|
||||
var err = executils.NewTimeoutCmd(3*time.Second, dateExe, "--set", timeutil.Format("Y-m-d H:i:s+P", currentTime)).
|
||||
Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if len(ntpdate) > 0 {
|
||||
return this.syncNtpdate(ntpdate, server)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *ClockManager) syncNtpdate(ntpdate string, server string) error {
|
||||
var cmd = executils.NewTimeoutCmd(30*time.Second, ntpdate, server)
|
||||
cmd.WithStderr()
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %s", err, cmd.Stderr())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadServer 参考自:https://medium.com/learning-the-go-programming-language/lets-make-an-ntp-client-in-go-287c4b9a969f
|
||||
func (this *ClockManager) ReadServer(server string) (time.Time, error) {
|
||||
conn, err := net.Dial("udp", server+":123")
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("connect to server failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
err = conn.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// configure request settings by specifying the first byte as
|
||||
// 00 011 011 (or 0x1B)
|
||||
// | | +-- client mode (3)
|
||||
// | + ----- version (3)
|
||||
// + -------- leap year indicator, 0 no warning
|
||||
|
||||
var req = &NTPPacket{Settings: 0x1B}
|
||||
err = binary.Write(conn, binary.BigEndian, req)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("write request failed: %w", err)
|
||||
}
|
||||
|
||||
var resp = &NTPPacket{}
|
||||
err = binary.Read(conn, binary.BigEndian, resp)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("write server response failed: %w", err)
|
||||
}
|
||||
|
||||
const ntpEpochOffset = 2208988800
|
||||
|
||||
var secs = float64(resp.TxTimeSec) - ntpEpochOffset
|
||||
var nanos = (int64(resp.TxTimeFrac) * 1e9) >> 32
|
||||
return time.Unix(int64(secs), nanos), nil
|
||||
}
|
||||
17
EdgeNode/internal/utils/clock/manager_test.go
Normal file
17
EdgeNode/internal/utils/clock/manager_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package clock_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/clock"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadServer(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
t.Log(clock.NewClockManager().ReadServer("pool.ntp.org"))
|
||||
}
|
||||
21
EdgeNode/internal/utils/clock/ntp_packet.go
Normal file
21
EdgeNode/internal/utils/clock/ntp_packet.go
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package clock
|
||||
|
||||
type NTPPacket struct {
|
||||
Settings uint8 // leap yr indicator, ver number, and mode
|
||||
Stratum uint8 // stratum of local clock
|
||||
Poll int8 // poll exponent
|
||||
Precision int8 // precision exponent
|
||||
RootDelay uint32 // root delay
|
||||
RootDispersion uint32 // root dispersion
|
||||
ReferenceID uint32 // reference id
|
||||
RefTimeSec uint32 // reference timestamp sec
|
||||
RefTimeFrac uint32 // reference timestamp fractional
|
||||
OrigTimeSec uint32 // origin time secs
|
||||
OrigTimeFrac uint32 // origin time fractional
|
||||
RxTimeSec uint32 // receive time secs
|
||||
RxTimeFrac uint32 // receive time frac
|
||||
TxTimeSec uint32 // transmit time secs
|
||||
TxTimeFrac uint32 // transmit time frac
|
||||
}
|
||||
41
EdgeNode/internal/utils/common_files.go
Normal file
41
EdgeNode/internal/utils/common_files.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var commonFileExtensionsMap = map[string]zero.Zero{
|
||||
".ico": zero.New(),
|
||||
".jpg": zero.New(),
|
||||
".jpeg": zero.New(),
|
||||
".gif": zero.New(),
|
||||
".png": zero.New(),
|
||||
".webp": zero.New(),
|
||||
".woff2": zero.New(),
|
||||
".js": zero.New(),
|
||||
".css": zero.New(),
|
||||
".ttf": zero.New(),
|
||||
".otf": zero.New(),
|
||||
".fnt": zero.New(),
|
||||
".svg": zero.New(),
|
||||
".map": zero.New(),
|
||||
".avif": zero.New(),
|
||||
".bmp": zero.New(),
|
||||
".cur": zero.New(),
|
||||
}
|
||||
|
||||
// IsCommonFileExtension 判断是否为常用文件扩展名
|
||||
// 不区分大小写,且不限于是否加点符号(.)
|
||||
func IsCommonFileExtension(ext string) bool {
|
||||
if len(ext) == 0 {
|
||||
return false
|
||||
}
|
||||
if ext[0] != '.' {
|
||||
ext = "." + ext
|
||||
}
|
||||
_, ok := commonFileExtensionsMap[strings.ToLower(ext)]
|
||||
return ok
|
||||
}
|
||||
20
EdgeNode/internal/utils/common_files_test.go
Normal file
20
EdgeNode/internal/utils/common_files_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsCommonFileExtension(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
a.IsTrue(utils.IsCommonFileExtension(".jpg"))
|
||||
a.IsTrue(utils.IsCommonFileExtension("png"))
|
||||
a.IsTrue(utils.IsCommonFileExtension("PNG"))
|
||||
a.IsTrue(utils.IsCommonFileExtension(".PNG"))
|
||||
a.IsTrue(utils.IsCommonFileExtension("Png"))
|
||||
a.IsFalse(utils.IsCommonFileExtension("zip"))
|
||||
}
|
||||
72
EdgeNode/internal/utils/conns/conn_no_stat.go
Normal file
72
EdgeNode/internal/utils/conns/conn_no_stat.go
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package connutils
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// 记录不需要带宽统计的连接
|
||||
// 比如本地的清理和预热
|
||||
var noStatAddrMap = map[string]zero.Zero{} // addr => Zero
|
||||
var noStatLocker = &sync.RWMutex{}
|
||||
|
||||
// IsNoStatConn 检查是否为不统计连接
|
||||
func IsNoStatConn(addr string) bool {
|
||||
noStatLocker.RLock()
|
||||
_, ok := noStatAddrMap[addr]
|
||||
noStatLocker.RUnlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
type NoStatConn struct {
|
||||
rawConn net.Conn
|
||||
}
|
||||
|
||||
func NewNoStat(rawConn net.Conn) net.Conn {
|
||||
noStatLocker.Lock()
|
||||
noStatAddrMap[rawConn.LocalAddr().String()] = zero.New()
|
||||
noStatLocker.Unlock()
|
||||
return &NoStatConn{rawConn: rawConn}
|
||||
}
|
||||
|
||||
func (this *NoStatConn) Read(b []byte) (n int, err error) {
|
||||
return this.rawConn.Read(b)
|
||||
}
|
||||
|
||||
func (this *NoStatConn) Write(b []byte) (n int, err error) {
|
||||
return this.rawConn.Write(b)
|
||||
}
|
||||
|
||||
func (this *NoStatConn) Close() error {
|
||||
err := this.rawConn.Close()
|
||||
|
||||
noStatLocker.Lock()
|
||||
delete(noStatAddrMap, this.rawConn.LocalAddr().String())
|
||||
noStatLocker.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (this *NoStatConn) LocalAddr() net.Addr {
|
||||
return this.rawConn.LocalAddr()
|
||||
}
|
||||
|
||||
func (this *NoStatConn) RemoteAddr() net.Addr {
|
||||
return this.rawConn.RemoteAddr()
|
||||
}
|
||||
|
||||
func (this *NoStatConn) SetDeadline(t time.Time) error {
|
||||
return this.rawConn.SetDeadline(t)
|
||||
}
|
||||
|
||||
func (this *NoStatConn) SetReadDeadline(t time.Time) error {
|
||||
return this.rawConn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (this *NoStatConn) SetWriteDeadline(t time.Time) error {
|
||||
return this.rawConn.SetWriteDeadline(t)
|
||||
}
|
||||
204
EdgeNode/internal/utils/counters/counter.go
Normal file
204
EdgeNode/internal/utils/counters/counter.go
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package counters
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
|
||||
syncutils "github.com/TeaOSLab/EdgeNode/internal/utils/sync"
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxItemsPerGroup = 50_000
|
||||
|
||||
var SharedCounter = NewCounter[uint32]().WithGC()
|
||||
|
||||
type SupportedUIntType interface {
|
||||
uint32 | uint64
|
||||
}
|
||||
|
||||
type Counter[T SupportedUIntType] struct {
|
||||
countMaps uint64
|
||||
locker *syncutils.RWMutex
|
||||
itemMaps []map[uint64]Item[T]
|
||||
|
||||
gcTicker *time.Ticker
|
||||
gcIndex int
|
||||
gcLocker sync.Mutex
|
||||
}
|
||||
|
||||
// NewCounter create new counter
|
||||
func NewCounter[T SupportedUIntType]() *Counter[T] {
|
||||
var count = memutils.SystemMemoryGB() * 8
|
||||
if count < 8 {
|
||||
count = 8
|
||||
}
|
||||
|
||||
var itemMaps = []map[uint64]Item[T]{}
|
||||
for i := 0; i < count; i++ {
|
||||
itemMaps = append(itemMaps, map[uint64]Item[T]{})
|
||||
}
|
||||
|
||||
var counter = &Counter[T]{
|
||||
countMaps: uint64(count),
|
||||
locker: syncutils.NewRWMutex(count),
|
||||
itemMaps: itemMaps,
|
||||
}
|
||||
|
||||
return counter
|
||||
}
|
||||
|
||||
// WithGC start the counter with gc automatically
|
||||
func (this *Counter[T]) WithGC() *Counter[T] {
|
||||
if this.gcTicker != nil {
|
||||
return this
|
||||
}
|
||||
this.gcTicker = time.NewTicker(1 * time.Second)
|
||||
goman.New(func() {
|
||||
for range this.gcTicker.C {
|
||||
this.GC()
|
||||
}
|
||||
})
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
// Increase key
|
||||
func (this *Counter[T]) Increase(key uint64, lifeSeconds int) T {
|
||||
var index = int(key % this.countMaps)
|
||||
this.locker.RLock(index)
|
||||
var item = this.itemMaps[index][key] // item MUST NOT be pointer
|
||||
this.locker.RUnlock(index)
|
||||
if !item.IsOk() {
|
||||
// no need to care about duplication
|
||||
// always insert new item even when itemMap is full
|
||||
item = NewItem[T](lifeSeconds)
|
||||
var result = item.Increase()
|
||||
this.locker.Lock(index)
|
||||
this.itemMaps[index][key] = item
|
||||
this.locker.Unlock(index)
|
||||
return result
|
||||
}
|
||||
|
||||
this.locker.Lock(index)
|
||||
var result = item.Increase()
|
||||
this.itemMaps[index][key] = item // overwrite
|
||||
this.locker.Unlock(index)
|
||||
return result
|
||||
}
|
||||
|
||||
// IncreaseKey increase string key
|
||||
func (this *Counter[T]) IncreaseKey(key string, lifeSeconds int) T {
|
||||
return this.Increase(this.hash(key), lifeSeconds)
|
||||
}
|
||||
|
||||
// Get value of key
|
||||
func (this *Counter[T]) Get(key uint64) T {
|
||||
var index = int(key % this.countMaps)
|
||||
this.locker.RLock(index)
|
||||
defer this.locker.RUnlock(index)
|
||||
var item = this.itemMaps[index][key]
|
||||
if item.IsOk() {
|
||||
return item.Sum()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetKey get value of string key
|
||||
func (this *Counter[T]) GetKey(key string) T {
|
||||
return this.Get(this.hash(key))
|
||||
}
|
||||
|
||||
// Reset key
|
||||
func (this *Counter[T]) Reset(key uint64) {
|
||||
var index = int(key % this.countMaps)
|
||||
this.locker.RLock(index)
|
||||
var item = this.itemMaps[index][key]
|
||||
this.locker.RUnlock(index)
|
||||
|
||||
if item.IsOk() {
|
||||
this.locker.Lock(index)
|
||||
delete(this.itemMaps[index], key)
|
||||
this.locker.Unlock(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetKey string key
|
||||
func (this *Counter[T]) ResetKey(key string) {
|
||||
this.Reset(this.hash(key))
|
||||
}
|
||||
|
||||
// TotalItems get items count
|
||||
func (this *Counter[T]) TotalItems() int {
|
||||
var total = 0
|
||||
|
||||
for i := 0; i < int(this.countMaps); i++ {
|
||||
this.locker.RLock(i)
|
||||
total += len(this.itemMaps[i])
|
||||
this.locker.RUnlock(i)
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// GC garbage expired items
|
||||
func (this *Counter[T]) GC() {
|
||||
this.gcLocker.Lock()
|
||||
var gcIndex = this.gcIndex
|
||||
|
||||
this.gcIndex++
|
||||
if this.gcIndex >= int(this.countMaps) {
|
||||
this.gcIndex = 0
|
||||
}
|
||||
|
||||
this.gcLocker.Unlock()
|
||||
|
||||
var currentTime = fasttime.Now().Unix()
|
||||
|
||||
this.locker.RLock(gcIndex)
|
||||
var itemMap = this.itemMaps[gcIndex]
|
||||
var expiredKeys = []uint64{}
|
||||
for key, item := range itemMap {
|
||||
if item.IsExpired(currentTime) {
|
||||
expiredKeys = append(expiredKeys, key)
|
||||
}
|
||||
}
|
||||
var tooManyItems = len(itemMap) > maxItemsPerGroup // prevent too many items
|
||||
this.locker.RUnlock(gcIndex)
|
||||
|
||||
if len(expiredKeys) > 0 {
|
||||
this.locker.Lock(gcIndex)
|
||||
for _, key := range expiredKeys {
|
||||
delete(itemMap, key)
|
||||
}
|
||||
this.locker.Unlock(gcIndex)
|
||||
}
|
||||
|
||||
if tooManyItems {
|
||||
this.locker.Lock(gcIndex)
|
||||
var count = len(itemMap) - maxItemsPerGroup
|
||||
if count > 0 {
|
||||
itemMap = this.itemMaps[gcIndex]
|
||||
for key := range itemMap {
|
||||
delete(itemMap, key)
|
||||
count--
|
||||
if count < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
this.locker.Unlock(gcIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Counter[T]) CountMaps() int {
|
||||
return int(this.countMaps)
|
||||
}
|
||||
|
||||
// calculate hash of the key
|
||||
func (this *Counter[T]) hash(key string) uint64 {
|
||||
return xxhash.Sum64String(key)
|
||||
}
|
||||
197
EdgeNode/internal/utils/counters/counter_test.go
Normal file
197
EdgeNode/internal/utils/counters/counter_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package counters_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/counters"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/rands"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCounter_Increase(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
a.IsTrue(counter.Increase(1, 10) == 1)
|
||||
a.IsTrue(counter.Increase(1, 10) == 2)
|
||||
a.IsTrue(counter.Increase(2, 10) == 1)
|
||||
|
||||
counter.Reset(1)
|
||||
a.IsTrue(counter.Get(1) == 0) // changed
|
||||
a.IsTrue(counter.Get(2) == 1) // not changed
|
||||
}
|
||||
|
||||
func TestCounter_IncreaseKey(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
a.IsTrue(counter.IncreaseKey("1", 10) == 1)
|
||||
a.IsTrue(counter.IncreaseKey("1", 10) == 2)
|
||||
a.IsTrue(counter.IncreaseKey("2", 10) == 1)
|
||||
|
||||
counter.ResetKey("1")
|
||||
a.IsTrue(counter.GetKey("1") == 0) // changed
|
||||
a.IsTrue(counter.GetKey("2") == 1) // not changed
|
||||
}
|
||||
|
||||
func TestCounter_GC(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
counter.Increase(1, 20)
|
||||
time.Sleep(1 * time.Second)
|
||||
counter.Increase(1, 20)
|
||||
time.Sleep(1 * time.Second)
|
||||
counter.Increase(1, 20)
|
||||
counter.GC()
|
||||
t.Log(counter.Get(1))
|
||||
}
|
||||
|
||||
func TestCounter_GC2(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var counter = counters.NewCounter[uint32]().WithGC()
|
||||
for i := 0; i < 100_000; i++ {
|
||||
counter.Increase(uint64(i), rands.Int(10, 300))
|
||||
}
|
||||
|
||||
var ticker = time.NewTicker(1 * time.Second)
|
||||
for range ticker.C {
|
||||
t.Log(timeutil.Format("H:i:s"), counter.TotalItems())
|
||||
if counter.TotalItems() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCounterMemory(t *testing.T) {
|
||||
var stat = &runtime.MemStats{}
|
||||
runtime.ReadMemStats(stat)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
for i := 0; i < 1_000_000; i++ {
|
||||
counter.Increase(uint64(i), rands.Int(10, 300))
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
runtime.GC()
|
||||
debug.FreeOSMemory()
|
||||
|
||||
var stat1 = &runtime.MemStats{}
|
||||
runtime.ReadMemStats(stat1)
|
||||
t.Log((stat1.HeapInuse-stat.HeapInuse)/(1<<20), "MB")
|
||||
|
||||
t.Log(counter.TotalItems())
|
||||
|
||||
var gcPause = func() {
|
||||
var before = time.Now()
|
||||
runtime.GC()
|
||||
var costSeconds = time.Since(before).Seconds()
|
||||
var stats = &debug.GCStats{}
|
||||
debug.ReadGCStats(stats)
|
||||
t.Log("GC pause:", stats.Pause[0].Seconds()*1000, "ms", "cost:", costSeconds*1000, "ms")
|
||||
}
|
||||
|
||||
gcPause()
|
||||
|
||||
_ = counter.TotalItems()
|
||||
}
|
||||
|
||||
func BenchmarkCounter_Increase(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
var i uint64
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
counter.Increase(atomic.AddUint64(&i, 1)%1_000_000, 20)
|
||||
}
|
||||
})
|
||||
|
||||
//b.Log(counter.TotalItems())
|
||||
}
|
||||
|
||||
func BenchmarkCounter_IncreaseKey(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
|
||||
go func() {
|
||||
var ticker = time.NewTicker(100 * time.Millisecond)
|
||||
for range ticker.C {
|
||||
counter.GC()
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
var i uint64
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
counter.IncreaseKey(types.String(atomic.AddUint64(&i, 1)%1_000_000), 20)
|
||||
}
|
||||
})
|
||||
|
||||
//b.Log(counter.TotalItems())
|
||||
}
|
||||
|
||||
func BenchmarkCounter_IncreaseKey2(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
|
||||
go func() {
|
||||
var ticker = time.NewTicker(1 * time.Millisecond)
|
||||
for range ticker.C {
|
||||
counter.GC()
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var i uint64
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
counter.IncreaseKey(types.String(atomic.AddUint64(&i, 1)%1e5), 20)
|
||||
}
|
||||
})
|
||||
|
||||
//b.Log(counter.TotalItems())
|
||||
}
|
||||
|
||||
func BenchmarkCounter_GC(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var counter = counters.NewCounter[uint32]()
|
||||
|
||||
for i := uint64(0); i < 1e5; i++ {
|
||||
counter.IncreaseKey(types.String(i), 20)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
counter.GC()
|
||||
}
|
||||
})
|
||||
|
||||
//b.Log(counter.TotalItems())
|
||||
}
|
||||
132
EdgeNode/internal/utils/counters/item.go
Normal file
132
EdgeNode/internal/utils/counters/item.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package counters
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
)
|
||||
|
||||
const spanMaxValue = 10_000_000
|
||||
const maxSpans = 10
|
||||
|
||||
type Item[T SupportedUIntType] struct {
|
||||
spans [maxSpans + 1]T
|
||||
lastUpdateTime int64
|
||||
lifeSeconds int64
|
||||
spanSeconds int64
|
||||
}
|
||||
|
||||
func NewItem[T SupportedUIntType](lifeSeconds int) Item[T] {
|
||||
if lifeSeconds <= 0 {
|
||||
lifeSeconds = 60
|
||||
}
|
||||
var spanSeconds = lifeSeconds / maxSpans
|
||||
if spanSeconds < 1 {
|
||||
spanSeconds = 1
|
||||
} else if lifeSeconds > maxSpans && lifeSeconds%maxSpans != 0 {
|
||||
spanSeconds++
|
||||
}
|
||||
|
||||
return Item[T]{
|
||||
lifeSeconds: int64(lifeSeconds),
|
||||
spanSeconds: int64(spanSeconds),
|
||||
lastUpdateTime: fasttime.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Item[T]) Increase() (result T) {
|
||||
var currentTime = fasttime.Now().Unix()
|
||||
var currentSpanIndex = this.calculateSpanIndex(currentTime)
|
||||
|
||||
// return quickly
|
||||
if this.lastUpdateTime == currentTime {
|
||||
if this.spans[currentSpanIndex] < spanMaxValue {
|
||||
this.spans[currentSpanIndex]++
|
||||
}
|
||||
for _, count := range this.spans {
|
||||
result += count
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if this.lastUpdateTime > 0 {
|
||||
if currentTime-this.lastUpdateTime > this.lifeSeconds {
|
||||
for index := range this.spans {
|
||||
this.spans[index] = 0
|
||||
}
|
||||
} else {
|
||||
var lastSpanIndex = this.calculateSpanIndex(this.lastUpdateTime)
|
||||
|
||||
if lastSpanIndex != currentSpanIndex {
|
||||
var countSpans = len(this.spans)
|
||||
|
||||
// reset values between LAST and CURRENT
|
||||
for index := lastSpanIndex + 1; ; index++ {
|
||||
var realIndex = index % countSpans
|
||||
this.spans[realIndex] = 0
|
||||
if realIndex == currentSpanIndex {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if this.spans[currentSpanIndex] < spanMaxValue {
|
||||
this.spans[currentSpanIndex]++
|
||||
}
|
||||
this.lastUpdateTime = currentTime
|
||||
|
||||
for _, count := range this.spans {
|
||||
result += count
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *Item[T]) Sum() (result T) {
|
||||
if this.lastUpdateTime == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var currentTime = fasttime.Now().Unix()
|
||||
var currentSpanIndex = this.calculateSpanIndex(currentTime)
|
||||
|
||||
if currentTime-this.lastUpdateTime > this.lifeSeconds {
|
||||
return 0
|
||||
} else {
|
||||
var lastSpanIndex = this.calculateSpanIndex(this.lastUpdateTime)
|
||||
var countSpans = len(this.spans)
|
||||
for index := currentSpanIndex + 1; ; index++ {
|
||||
var realIndex = index % countSpans
|
||||
result += this.spans[realIndex]
|
||||
if realIndex == lastSpanIndex {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (this *Item[T]) Reset() {
|
||||
for index := range this.spans {
|
||||
this.spans[index] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Item[T]) IsExpired(currentTime int64) bool {
|
||||
return this.lastUpdateTime < currentTime-this.lifeSeconds-this.spanSeconds
|
||||
}
|
||||
|
||||
func (this *Item[T]) calculateSpanIndex(timestamp int64) int {
|
||||
var index = int(timestamp % this.lifeSeconds / this.spanSeconds)
|
||||
if index > maxSpans-1 {
|
||||
return maxSpans - 1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (this *Item[T]) IsOk() bool {
|
||||
return this.lifeSeconds > 0
|
||||
}
|
||||
82
EdgeNode/internal/utils/counters/item_test.go
Normal file
82
EdgeNode/internal/utils/counters/item_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package counters_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/counters"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestItem_Increase(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var item = counters.NewItem[uint32](10)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(1 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(2 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(5 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(6 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(5 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
time.Sleep(11 * time.Second)
|
||||
t.Log(item.Increase(), item.Sum())
|
||||
}
|
||||
|
||||
func TestItem_Increase2(t *testing.T) {
|
||||
// run only under single testing
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var item = counters.NewItem[uint32](23)
|
||||
for i := 0; i < 100; i++ {
|
||||
t.Log("round "+types.String(i)+":", item.Increase(), item.Sum(), timeutil.Format("H:i:s"))
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
item.Reset()
|
||||
a.IsTrue(item.Sum() == 0)
|
||||
}
|
||||
|
||||
func TestItem_IsExpired(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var item = counters.NewItem[uint32](10)
|
||||
t.Log(item.IsExpired(time.Now().Unix()))
|
||||
time.Sleep(10 * time.Second)
|
||||
t.Log(item.IsExpired(time.Now().Unix()))
|
||||
time.Sleep(2 * time.Second)
|
||||
t.Log(item.IsExpired(time.Now().Unix()))
|
||||
time.Sleep(2 * time.Second)
|
||||
t.Log(item.IsExpired(time.Now().Unix()))
|
||||
}
|
||||
|
||||
func BenchmarkItem_Increase(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var item = counters.NewItem[uint32](60)
|
||||
item.Increase()
|
||||
item.Sum()
|
||||
}
|
||||
})
|
||||
}
|
||||
192
EdgeNode/internal/utils/dbs/batch.go
Normal file
192
EdgeNode/internal/utils/dbs/batch.go
Normal file
@@ -0,0 +1,192 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package dbs
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
"time"
|
||||
)
|
||||
|
||||
type batchItem struct {
|
||||
query string
|
||||
args []any
|
||||
}
|
||||
|
||||
type Batch struct {
|
||||
db *DB
|
||||
n int
|
||||
|
||||
enableStat bool
|
||||
|
||||
onFail func(err error)
|
||||
|
||||
queue chan *batchItem
|
||||
closeEvent chan bool
|
||||
|
||||
isClosed bool
|
||||
}
|
||||
|
||||
func NewBatch(db *DB, n int) *Batch {
|
||||
var batch = &Batch{
|
||||
db: db,
|
||||
n: n,
|
||||
queue: make(chan *batchItem, 16),
|
||||
closeEvent: make(chan bool, 1),
|
||||
}
|
||||
db.batches = append(db.batches, batch)
|
||||
return batch
|
||||
}
|
||||
|
||||
func (this *Batch) EnableStat(b bool) {
|
||||
this.enableStat = b
|
||||
}
|
||||
|
||||
func (this *Batch) OnFail(callback func(err error)) {
|
||||
this.onFail = callback
|
||||
}
|
||||
|
||||
func (this *Batch) Add(query string, args ...any) {
|
||||
if this.isClosed {
|
||||
return
|
||||
}
|
||||
this.queue <- &batchItem{
|
||||
query: query,
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Batch) Exec() {
|
||||
var n = this.n
|
||||
if n <= 0 {
|
||||
n = 4
|
||||
}
|
||||
|
||||
var ticker = time.NewTicker(100 * time.Millisecond)
|
||||
var count = 0
|
||||
var lastTx *sql.Tx
|
||||
For:
|
||||
for {
|
||||
// closed
|
||||
if this.isClosed {
|
||||
if lastTx != nil {
|
||||
_ = this.commitTx(lastTx)
|
||||
lastTx = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case item := <-this.queue:
|
||||
if lastTx == nil {
|
||||
lastTx = this.beginTx()
|
||||
if lastTx == nil {
|
||||
continue For
|
||||
}
|
||||
}
|
||||
|
||||
err := this.execItem(lastTx, item)
|
||||
if err != nil {
|
||||
if IsClosedErr(err) {
|
||||
return
|
||||
}
|
||||
this.processErr(item.query, err)
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
if count == n {
|
||||
count = 0
|
||||
err = this.commitTx(lastTx)
|
||||
lastTx = nil
|
||||
if err != nil {
|
||||
if IsClosedErr(err) {
|
||||
return
|
||||
}
|
||||
this.processErr("commit", err)
|
||||
}
|
||||
}
|
||||
case <-ticker.C:
|
||||
if lastTx == nil || count == 0 {
|
||||
continue For
|
||||
}
|
||||
count = 0
|
||||
err := this.commitTx(lastTx)
|
||||
lastTx = nil
|
||||
if err != nil {
|
||||
if IsClosedErr(err) {
|
||||
return
|
||||
}
|
||||
this.processErr("commit", err)
|
||||
}
|
||||
case <-this.closeEvent:
|
||||
// closed
|
||||
|
||||
if lastTx != nil {
|
||||
_ = this.commitTx(lastTx)
|
||||
lastTx = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Batch) close() {
|
||||
this.isClosed = true
|
||||
|
||||
select {
|
||||
case this.closeEvent <- true:
|
||||
default:
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Batch) beginTx() *sql.Tx {
|
||||
if !this.db.BeginUpdating() {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := this.db.Begin()
|
||||
if err != nil {
|
||||
this.processErr("begin transaction", err)
|
||||
this.db.EndUpdating()
|
||||
return nil
|
||||
}
|
||||
return tx
|
||||
}
|
||||
|
||||
func (this *Batch) commitTx(tx *sql.Tx) error {
|
||||
// always commit without checking database closing status
|
||||
this.db.EndUpdating()
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (this *Batch) execItem(tx *sql.Tx, item *batchItem) error {
|
||||
// check database status
|
||||
if this.db.BeginUpdating() {
|
||||
defer this.db.EndUpdating()
|
||||
} else {
|
||||
return errDBIsClosed
|
||||
}
|
||||
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(item.query).End()
|
||||
}
|
||||
|
||||
_, err := tx.Exec(item.query, item.args...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (this *Batch) processErr(prefix string, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if this.onFail != nil {
|
||||
this.onFail(err)
|
||||
} else {
|
||||
remotelogs.Error("SQLITE_BATCH", prefix+": "+err.Error())
|
||||
}
|
||||
}
|
||||
265
EdgeNode/internal/utils/dbs/db.go
Normal file
265
EdgeNode/internal/utils/dbs/db.go
Normal file
@@ -0,0 +1,265 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fs"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
SyncMode = "OFF"
|
||||
)
|
||||
|
||||
var errDBIsClosed = errors.New("the database is closed")
|
||||
|
||||
type DB struct {
|
||||
locker *fsutils.Locker
|
||||
rawDB *sql.DB
|
||||
dsn string
|
||||
|
||||
statusLocker sync.Mutex
|
||||
countUpdating int32
|
||||
|
||||
isClosing bool
|
||||
|
||||
enableStat bool
|
||||
|
||||
batches []*Batch
|
||||
}
|
||||
|
||||
func OpenWriter(dsn string) (*DB, error) {
|
||||
return open(dsn, true)
|
||||
}
|
||||
|
||||
func OpenReader(dsn string) (*DB, error) {
|
||||
return open(dsn, false)
|
||||
}
|
||||
|
||||
func open(dsn string, lock bool) (*DB, error) {
|
||||
if teaconst.IsQuiting {
|
||||
return nil, errors.New("can not open database when process is quiting")
|
||||
}
|
||||
|
||||
// decode path
|
||||
var path = dsn
|
||||
var queryIndex = strings.Index(dsn, "?")
|
||||
if queryIndex >= 0 {
|
||||
path = path[:queryIndex]
|
||||
}
|
||||
path = strings.TrimSpace(strings.TrimPrefix(path, "file:"))
|
||||
|
||||
// locker
|
||||
var locker *fsutils.Locker
|
||||
if lock {
|
||||
locker = fsutils.NewLocker(path)
|
||||
err := locker.Lock()
|
||||
if err != nil {
|
||||
remotelogs.Warn("DB", "lock '"+path+"' failed: "+err.Error())
|
||||
locker = nil
|
||||
}
|
||||
}
|
||||
|
||||
// check if closed successfully last time, if not we recover it
|
||||
var walPath = path + "-wal"
|
||||
_, statWalErr := os.Stat(walPath)
|
||||
var shouldRecover = statWalErr == nil
|
||||
|
||||
// open
|
||||
rawDB, err := sql.Open("sqlite3", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if shouldRecover {
|
||||
err = rawDB.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// open again
|
||||
rawDB, err = sql.Open("sqlite3", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var db = NewDB(rawDB, dsn)
|
||||
db.locker = locker
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func NewDB(rawDB *sql.DB, dsn string) *DB {
|
||||
var db = &DB{
|
||||
rawDB: rawDB,
|
||||
dsn: dsn,
|
||||
}
|
||||
|
||||
events.OnKey(events.EventQuit, fmt.Sprintf("db_%p", db), func() {
|
||||
_ = db.Close()
|
||||
})
|
||||
events.OnKey(events.EventTerminated, fmt.Sprintf("db_%p", db), func() {
|
||||
_ = db.Close()
|
||||
})
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (this *DB) SetMaxOpenConns(n int) {
|
||||
this.rawDB.SetMaxOpenConns(n)
|
||||
}
|
||||
|
||||
func (this *DB) EnableStat(b bool) {
|
||||
this.enableStat = b
|
||||
}
|
||||
|
||||
func (this *DB) Begin() (*sql.Tx, error) {
|
||||
// check database status
|
||||
if this.BeginUpdating() {
|
||||
defer this.EndUpdating()
|
||||
} else {
|
||||
return nil, errDBIsClosed
|
||||
}
|
||||
|
||||
return this.rawDB.Begin()
|
||||
}
|
||||
|
||||
func (this *DB) Prepare(query string) (*Stmt, error) {
|
||||
stmt, err := this.rawDB.Prepare(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var s = NewStmt(this, stmt, query)
|
||||
if this.enableStat {
|
||||
s.EnableStat()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (this *DB) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) {
|
||||
// check database status
|
||||
if this.BeginUpdating() {
|
||||
defer this.EndUpdating()
|
||||
} else {
|
||||
return nil, errDBIsClosed
|
||||
}
|
||||
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(query).End()
|
||||
}
|
||||
|
||||
return this.rawDB.ExecContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
func (this *DB) Exec(query string, args ...any) (sql.Result, error) {
|
||||
// check database status
|
||||
if this.BeginUpdating() {
|
||||
defer this.EndUpdating()
|
||||
} else {
|
||||
return nil, errDBIsClosed
|
||||
}
|
||||
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(query).End()
|
||||
}
|
||||
return this.rawDB.Exec(query, args...)
|
||||
}
|
||||
|
||||
func (this *DB) Query(query string, args ...any) (*sql.Rows, error) {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(query).End()
|
||||
}
|
||||
return this.rawDB.Query(query, args...)
|
||||
}
|
||||
|
||||
func (this *DB) QueryRow(query string, args ...any) *sql.Row {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(query).End()
|
||||
}
|
||||
return this.rawDB.QueryRow(query, args...)
|
||||
}
|
||||
|
||||
// Close the database
|
||||
func (this *DB) Close() error {
|
||||
// check database status
|
||||
this.statusLocker.Lock()
|
||||
if this.isClosing {
|
||||
this.statusLocker.Unlock()
|
||||
return nil
|
||||
}
|
||||
this.isClosing = true
|
||||
this.statusLocker.Unlock()
|
||||
|
||||
// waiting for updating operations to finish
|
||||
var maxLoops = 5_000
|
||||
for {
|
||||
this.statusLocker.Lock()
|
||||
var countUpdating = this.countUpdating
|
||||
this.statusLocker.Unlock()
|
||||
if countUpdating <= 0 {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
|
||||
maxLoops--
|
||||
if maxLoops <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, batch := range this.batches {
|
||||
batch.close()
|
||||
}
|
||||
|
||||
events.Remove(fmt.Sprintf("db_%p", this))
|
||||
|
||||
defer func() {
|
||||
if this.locker != nil {
|
||||
_ = this.locker.Release()
|
||||
}
|
||||
}()
|
||||
|
||||
// print log
|
||||
/**if len(this.dsn) > 0 {
|
||||
u, _ := url.Parse(this.dsn)
|
||||
if u != nil && len(u.Path) > 0 {
|
||||
remotelogs.Debug("DB", "close '"+u.Path)
|
||||
}
|
||||
}**/
|
||||
|
||||
return this.rawDB.Close()
|
||||
}
|
||||
|
||||
func (this *DB) BeginUpdating() bool {
|
||||
this.statusLocker.Lock()
|
||||
defer this.statusLocker.Unlock()
|
||||
|
||||
if this.isClosing {
|
||||
return false
|
||||
}
|
||||
|
||||
this.countUpdating++
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *DB) EndUpdating() {
|
||||
this.statusLocker.Lock()
|
||||
this.countUpdating--
|
||||
this.statusLocker.Unlock()
|
||||
}
|
||||
|
||||
func (this *DB) RawDB() *sql.DB {
|
||||
return this.rawDB
|
||||
}
|
||||
18
EdgeNode/internal/utils/dbs/db_test.go
Normal file
18
EdgeNode/internal/utils/dbs/db_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package dbs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseDSN(t *testing.T) {
|
||||
var dsn = "file:/home/cache/p43/.indexes/db-3.db?cache=private&mode=ro&_journal_mode=WAL&_sync=" + dbs.SyncMode + "&_cache_size=88000"
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(u.Path) // expect: :/home/cache/p43/.indexes/db-3.db
|
||||
}
|
||||
24
EdgeNode/internal/utils/dbs/query_label.go
Normal file
24
EdgeNode/internal/utils/dbs/query_label.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs
|
||||
|
||||
import "time"
|
||||
|
||||
type QueryLabel struct {
|
||||
manager *QueryStatManager
|
||||
query string
|
||||
before time.Time
|
||||
}
|
||||
|
||||
func NewQueryLabel(manager *QueryStatManager, query string) *QueryLabel {
|
||||
return &QueryLabel{
|
||||
manager: manager,
|
||||
query: query,
|
||||
before: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *QueryLabel) End() {
|
||||
var cost = time.Since(this.before).Seconds()
|
||||
this.manager.AddCost(this.query, cost)
|
||||
}
|
||||
30
EdgeNode/internal/utils/dbs/query_stat.go
Normal file
30
EdgeNode/internal/utils/dbs/query_stat.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs
|
||||
|
||||
type QueryStat struct {
|
||||
Query string
|
||||
CostMin float64
|
||||
CostMax float64
|
||||
|
||||
CostTotal float64
|
||||
Calls int64
|
||||
}
|
||||
|
||||
func NewQueryStat(query string) *QueryStat {
|
||||
return &QueryStat{
|
||||
Query: query,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *QueryStat) AddCost(cost float64) {
|
||||
if this.CostMin == 0 || this.CostMin > cost {
|
||||
this.CostMin = cost
|
||||
}
|
||||
if this.CostMax == 0 || this.CostMax < cost {
|
||||
this.CostMax = cost
|
||||
}
|
||||
|
||||
this.CostTotal += cost
|
||||
this.Calls++
|
||||
}
|
||||
89
EdgeNode/internal/utils/dbs/query_stat_manager.go
Normal file
89
EdgeNode/internal/utils/dbs/query_stat_manager.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
var ticker = time.NewTicker(5 * time.Second)
|
||||
|
||||
events.On(events.EventLoaded, func() {
|
||||
if teaconst.EnableDBStat {
|
||||
goman.New(func() {
|
||||
for range ticker.C {
|
||||
var stats = []string{}
|
||||
for _, stat := range SharedQueryStatManager.TopN(10) {
|
||||
var avg = stat.CostTotal / float64(stat.Calls)
|
||||
var query = stat.Query
|
||||
if len(query) > 128 {
|
||||
query = query[:128]
|
||||
}
|
||||
stats = append(stats, fmt.Sprintf("%.2fms/%.2fms/%.2fms - %d - %s", stat.CostMin*1000, stat.CostMax*1000, avg*1000, stat.Calls, query))
|
||||
}
|
||||
logs.Println("\n========== DB STATS ==========\n" + strings.Join(stats, "\n") + "\n=============================")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var SharedQueryStatManager = NewQueryStatManager()
|
||||
|
||||
type QueryStatManager struct {
|
||||
statsMap map[string]*QueryStat // query => *QueryStat
|
||||
locker sync.Mutex
|
||||
}
|
||||
|
||||
func NewQueryStatManager() *QueryStatManager {
|
||||
return &QueryStatManager{
|
||||
statsMap: map[string]*QueryStat{},
|
||||
}
|
||||
}
|
||||
|
||||
func (this *QueryStatManager) AddQuery(query string) *QueryLabel {
|
||||
return NewQueryLabel(this, query)
|
||||
}
|
||||
|
||||
func (this *QueryStatManager) AddCost(query string, cost float64) {
|
||||
this.locker.Lock()
|
||||
defer this.locker.Unlock()
|
||||
|
||||
stat, ok := this.statsMap[query]
|
||||
if !ok {
|
||||
stat = NewQueryStat(query)
|
||||
this.statsMap[query] = stat
|
||||
}
|
||||
stat.AddCost(cost)
|
||||
}
|
||||
|
||||
func (this *QueryStatManager) TopN(n int) []*QueryStat {
|
||||
this.locker.Lock()
|
||||
defer this.locker.Unlock()
|
||||
|
||||
var stats = []*QueryStat{}
|
||||
for _, stat := range this.statsMap {
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
sort.Slice(stats, func(i, j int) bool {
|
||||
return stats[i].CostMax > stats[j].CostMax
|
||||
})
|
||||
|
||||
if len(stats) > n {
|
||||
return stats[:n]
|
||||
}
|
||||
return stats
|
||||
}
|
||||
24
EdgeNode/internal/utils/dbs/query_stat_manager_test.go
Normal file
24
EdgeNode/internal/utils/dbs/query_stat_manager_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestQueryStatManager(t *testing.T) {
|
||||
var manager = dbs.NewQueryStatManager()
|
||||
{
|
||||
var label = manager.AddQuery("sql 1")
|
||||
time.Sleep(1 * time.Second)
|
||||
label.End()
|
||||
}
|
||||
manager.AddQuery("sql 1").End()
|
||||
manager.AddQuery("sql 2").End()
|
||||
for _, stat := range manager.TopN(10) {
|
||||
logs.PrintAsJSON(stat, t)
|
||||
}
|
||||
}
|
||||
105
EdgeNode/internal/utils/dbs/stmt.go
Normal file
105
EdgeNode/internal/utils/dbs/stmt.go
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package dbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
|
||||
)
|
||||
|
||||
type Stmt struct {
|
||||
db *DB
|
||||
rawStmt *sql.Stmt
|
||||
query string
|
||||
|
||||
enableStat bool
|
||||
}
|
||||
|
||||
func NewStmt(db *DB, rawStmt *sql.Stmt, query string) *Stmt {
|
||||
return &Stmt{
|
||||
db: db,
|
||||
rawStmt: rawStmt,
|
||||
query: query,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Stmt) EnableStat() {
|
||||
this.enableStat = true
|
||||
}
|
||||
|
||||
func (this *Stmt) ExecContext(ctx context.Context, args ...any) (result sql.Result, err error) {
|
||||
// check database status
|
||||
if this.db.BeginUpdating() {
|
||||
defer this.db.EndUpdating()
|
||||
} else {
|
||||
return nil, errDBIsClosed
|
||||
}
|
||||
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
fsutils.WriterLimiter.Ack()
|
||||
result, err = this.rawStmt.ExecContext(ctx, args...)
|
||||
fsutils.WriterLimiter.Release()
|
||||
return
|
||||
}
|
||||
|
||||
func (this *Stmt) Exec(args ...any) (result sql.Result, err error) {
|
||||
// check database status
|
||||
if this.db.BeginUpdating() {
|
||||
defer this.db.EndUpdating()
|
||||
} else {
|
||||
return nil, errDBIsClosed
|
||||
}
|
||||
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
|
||||
fsutils.WriterLimiter.Ack()
|
||||
result, err = this.rawStmt.Exec(args...)
|
||||
fsutils.WriterLimiter.Release()
|
||||
return
|
||||
}
|
||||
|
||||
func (this *Stmt) QueryContext(ctx context.Context, args ...any) (*sql.Rows, error) {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
return this.rawStmt.QueryContext(ctx, args...)
|
||||
}
|
||||
|
||||
func (this *Stmt) Query(args ...any) (*sql.Rows, error) {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
rows, err := this.rawStmt.Query(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rowsErr = rows.Err()
|
||||
if rowsErr != nil {
|
||||
_ = rows.Close()
|
||||
return nil, rowsErr
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func (this *Stmt) QueryRowContext(ctx context.Context, args ...any) *sql.Row {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
return this.rawStmt.QueryRowContext(ctx, args...)
|
||||
}
|
||||
|
||||
func (this *Stmt) QueryRow(args ...any) *sql.Row {
|
||||
if this.enableStat {
|
||||
defer SharedQueryStatManager.AddQuery(this.query).End()
|
||||
}
|
||||
return this.rawStmt.QueryRow(args...)
|
||||
}
|
||||
|
||||
func (this *Stmt) Close() error {
|
||||
return this.rawStmt.Close()
|
||||
}
|
||||
7
EdgeNode/internal/utils/dbs/utils.go
Normal file
7
EdgeNode/internal/utils/dbs/utils.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package dbs
|
||||
|
||||
func IsClosedErr(err error) bool {
|
||||
return err == errDBIsClosed
|
||||
}
|
||||
202
EdgeNode/internal/utils/encrypt.go
Normal file
202
EdgeNode/internal/utils/encrypt.go
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/TeaOSLab/EdgeCommon/pkg/nodeconfigs"
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"github.com/iwind/TeaGo/maps"
|
||||
"github.com/iwind/TeaGo/rands"
|
||||
stringutil "github.com/iwind/TeaGo/utils/string"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultNodeEncryptKey = rands.HexString(32)
|
||||
defaultClusterEncryptKey = rands.HexString(32)
|
||||
)
|
||||
|
||||
var encryptV2Suffix = []byte("$v2")
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
events.On(events.EventReload, func() {
|
||||
nodeConfig, _ := nodeconfigs.SharedNodeConfig()
|
||||
if nodeConfig != nil {
|
||||
defaultNodeEncryptKey = stringutil.Md5(nodeConfig.NodeId + "@" + nodeConfig.Secret)
|
||||
if len(nodeConfig.ClusterSecret) == 0 {
|
||||
defaultClusterEncryptKey = defaultNodeEncryptKey
|
||||
} else {
|
||||
defaultClusterEncryptKey = stringutil.Md5(nodeConfig.ClusterSecret)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SimpleEncrypt 加密特殊信息
|
||||
func SimpleEncrypt(data []byte) []byte {
|
||||
var method = &AES256CFBMethod{}
|
||||
err := method.Init([]byte(defaultClusterEncryptKey), []byte(defaultClusterEncryptKey[:16]))
|
||||
if err != nil {
|
||||
logs.Println("[SimpleEncrypt]" + err.Error())
|
||||
return data
|
||||
}
|
||||
|
||||
dst, err := method.Encrypt(data)
|
||||
if err != nil {
|
||||
logs.Println("[SimpleEncrypt]" + err.Error())
|
||||
return data
|
||||
}
|
||||
dst = append(dst, encryptV2Suffix...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// SimpleDecrypt 解密特殊信息
|
||||
func SimpleDecrypt(data []byte) []byte {
|
||||
if bytes.HasSuffix(data, encryptV2Suffix) {
|
||||
data = data[:len(data)-len(encryptV2Suffix)]
|
||||
return simpleDecrypt(data, defaultClusterEncryptKey)
|
||||
}
|
||||
|
||||
// 兼容老的Key
|
||||
return simpleDecrypt(data, defaultNodeEncryptKey)
|
||||
}
|
||||
|
||||
func simpleDecrypt(data []byte, key string) []byte {
|
||||
var method = &AES256CFBMethod{}
|
||||
err := method.Init([]byte(key), []byte(key[:16]))
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
|
||||
src, err := method.Decrypt(data)
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
func SimpleEncryptMap(m maps.Map) (base64String string, err error) {
|
||||
mJSON, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var data = SimpleEncrypt(mJSON)
|
||||
return base64.StdEncoding.EncodeToString(data), nil
|
||||
}
|
||||
|
||||
func SimpleDecryptMap(base64String string) (maps.Map, error) {
|
||||
data, err := base64.StdEncoding.DecodeString(base64String)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var mJSON = SimpleDecrypt(data)
|
||||
var result = maps.Map{}
|
||||
err = json.Unmarshal(mJSON, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func SimpleEncryptObject(ptr any) (string, error) {
|
||||
mJSON, err := json.Marshal(ptr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var data = SimpleEncrypt(mJSON)
|
||||
return base64.StdEncoding.EncodeToString(data), nil
|
||||
}
|
||||
|
||||
func SimpleDecryptObjet(base64String string, ptr any) error {
|
||||
data, err := base64.StdEncoding.DecodeString(base64String)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mJSON = SimpleDecrypt(data)
|
||||
err = json.Unmarshal(mJSON, ptr)
|
||||
return err
|
||||
}
|
||||
|
||||
type AES256CFBMethod struct {
|
||||
block cipher.Block
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Init(key, iv []byte) error {
|
||||
// 判断key是否为32长度
|
||||
var l = len(key)
|
||||
if l > 32 {
|
||||
key = key[:32]
|
||||
} else if l < 32 {
|
||||
key = append(key, bytes.Repeat([]byte{' '}, 32-l)...)
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.block = block
|
||||
|
||||
// 判断iv长度
|
||||
var l2 = len(iv)
|
||||
if l2 > aes.BlockSize {
|
||||
iv = iv[:aes.BlockSize]
|
||||
} else if l2 < aes.BlockSize {
|
||||
iv = append(iv, bytes.Repeat([]byte{' '}, aes.BlockSize-l2)...)
|
||||
}
|
||||
this.iv = iv
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Encrypt(src []byte) (dst []byte, err error) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
var r = recover()
|
||||
if r != nil {
|
||||
err = errors.New("encrypt failed")
|
||||
}
|
||||
}()
|
||||
|
||||
dst = make([]byte, len(src))
|
||||
|
||||
var encrypter = cipher.NewCFBEncrypter(this.block, this.iv)
|
||||
encrypter.XORKeyStream(dst, src)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Decrypt(dst []byte) (src []byte, err error) {
|
||||
if len(dst) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r != nil {
|
||||
err = errors.New("decrypt failed")
|
||||
}
|
||||
}()
|
||||
|
||||
src = make([]byte, len(dst))
|
||||
var decrypter = cipher.NewCFBDecrypter(this.block, this.iv)
|
||||
decrypter.XORKeyStream(src, dst)
|
||||
|
||||
return
|
||||
}
|
||||
41
EdgeNode/internal/utils/encrypt/magic_key.go
Normal file
41
EdgeNode/internal/utils/encrypt/magic_key.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
)
|
||||
|
||||
const (
|
||||
MagicKey = "f1c8eafb543f03023e97b7be864a4e9b"
|
||||
)
|
||||
|
||||
// 加密特殊信息
|
||||
func MagicKeyEncode(data []byte) []byte {
|
||||
method, err := NewMethodInstance("aes-256-cfb", MagicKey, MagicKey[:16])
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
|
||||
dst, err := method.Encrypt(data)
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// 解密特殊信息
|
||||
func MagicKeyDecode(data []byte) []byte {
|
||||
method, err := NewMethodInstance("aes-256-cfb", MagicKey, MagicKey[:16])
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
|
||||
src, err := method.Decrypt(data)
|
||||
if err != nil {
|
||||
logs.Println("[MagicKeyEncode]" + err.Error())
|
||||
return data
|
||||
}
|
||||
return src
|
||||
}
|
||||
14
EdgeNode/internal/utils/encrypt/magic_key_test.go
Normal file
14
EdgeNode/internal/utils/encrypt/magic_key_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMagicKeyEncode(t *testing.T) {
|
||||
var dst = encrypt.MagicKeyEncode([]byte("Hello,World"))
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
var src = encrypt.MagicKeyDecode(dst)
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
12
EdgeNode/internal/utils/encrypt/method.go
Normal file
12
EdgeNode/internal/utils/encrypt/method.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package encrypt
|
||||
|
||||
type MethodInterface interface {
|
||||
// Init 初始化
|
||||
Init(key []byte, iv []byte) error
|
||||
|
||||
// Encrypt 加密
|
||||
Encrypt(src []byte) (dst []byte, err error)
|
||||
|
||||
// Decrypt 解密
|
||||
Decrypt(dst []byte) (src []byte, err error)
|
||||
}
|
||||
73
EdgeNode/internal/utils/encrypt/method_aes_128_cfb.go
Normal file
73
EdgeNode/internal/utils/encrypt/method_aes_128_cfb.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
type AES128CFBMethod struct {
|
||||
iv []byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
func (this *AES128CFBMethod) Init(key, iv []byte) error {
|
||||
// 判断key是否为32长度
|
||||
l := len(key)
|
||||
if l > 16 {
|
||||
key = key[:16]
|
||||
} else if l < 16 {
|
||||
key = append(key, bytes.Repeat([]byte{' '}, 16-l)...)
|
||||
}
|
||||
|
||||
// 判断iv长度
|
||||
l2 := len(iv)
|
||||
if l2 > aes.BlockSize {
|
||||
iv = iv[:aes.BlockSize]
|
||||
} else if l2 < aes.BlockSize {
|
||||
iv = append(iv, bytes.Repeat([]byte{' '}, aes.BlockSize-l2)...)
|
||||
}
|
||||
|
||||
this.iv = iv
|
||||
|
||||
// block
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.block = block
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *AES128CFBMethod) Encrypt(src []byte) (dst []byte, err error) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
dst = make([]byte, len(src))
|
||||
encrypter := cipher.NewCFBEncrypter(this.block, this.iv)
|
||||
encrypter.XORKeyStream(dst, src)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *AES128CFBMethod) Decrypt(dst []byte) (src []byte, err error) {
|
||||
if len(dst) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
src = make([]byte, len(dst))
|
||||
encrypter := cipher.NewCFBDecrypter(this.block, this.iv)
|
||||
encrypter.XORKeyStream(src, dst)
|
||||
|
||||
return
|
||||
}
|
||||
90
EdgeNode/internal/utils/encrypt/method_aes_128_cfb_test.go
Normal file
90
EdgeNode/internal/utils/encrypt/method_aes_128_cfb_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAES128CFBMethod_Encrypt(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("aes-128-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var src = []byte("Hello, World")
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
src, err = method.Decrypt(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
|
||||
func TestAES128CFBMethod_Encrypt2(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("aes-128-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var sources = [][]byte{}
|
||||
|
||||
{
|
||||
a := []byte{1}
|
||||
_, err = method.Encrypt(a)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
src := []byte(strings.Repeat("Hello", 1))
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sources = append(sources, dst)
|
||||
}
|
||||
|
||||
{
|
||||
a := []byte{1}
|
||||
_, err = method.Decrypt(a)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, dst := range sources {
|
||||
dst2 := append([]byte{}, dst...)
|
||||
src2, err := method.Decrypt(dst2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(string(src2))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAES128CFBMethod_Encrypt(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
method, err := encrypt.NewMethodInstance("aes-128-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
var src = []byte(strings.Repeat("Hello", 1024))
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_ = dst
|
||||
}
|
||||
}
|
||||
74
EdgeNode/internal/utils/encrypt/method_aes_192_cfb.go
Normal file
74
EdgeNode/internal/utils/encrypt/method_aes_192_cfb.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
type AES192CFBMethod struct {
|
||||
block cipher.Block
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func (this *AES192CFBMethod) Init(key, iv []byte) error {
|
||||
// 判断key是否为24长度
|
||||
l := len(key)
|
||||
if l > 24 {
|
||||
key = key[:24]
|
||||
} else if l < 24 {
|
||||
key = append(key, bytes.Repeat([]byte{' '}, 24-l)...)
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.block = block
|
||||
|
||||
// 判断iv长度
|
||||
l2 := len(iv)
|
||||
if l2 > aes.BlockSize {
|
||||
iv = iv[:aes.BlockSize]
|
||||
} else if l2 < aes.BlockSize {
|
||||
iv = append(iv, bytes.Repeat([]byte{' '}, aes.BlockSize-l2)...)
|
||||
}
|
||||
|
||||
this.iv = iv
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *AES192CFBMethod) Encrypt(src []byte) (dst []byte, err error) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
dst = make([]byte, len(src))
|
||||
|
||||
encrypter := cipher.NewCFBEncrypter(this.block, this.iv)
|
||||
encrypter.XORKeyStream(dst, src)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *AES192CFBMethod) Decrypt(dst []byte) (src []byte, err error) {
|
||||
if len(dst) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
src = make([]byte, len(dst))
|
||||
|
||||
decrypter := cipher.NewCFBDecrypter(this.block, this.iv)
|
||||
decrypter.XORKeyStream(src, dst)
|
||||
|
||||
return
|
||||
}
|
||||
46
EdgeNode/internal/utils/encrypt/method_aes_192_cfb_test.go
Normal file
46
EdgeNode/internal/utils/encrypt/method_aes_192_cfb_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAES192CFBMethod_Encrypt(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("aes-192-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
src := []byte("Hello, World")
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
src, err = method.Decrypt(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
|
||||
func BenchmarkAES192CFBMethod_Encrypt(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
|
||||
method, err := encrypt.NewMethodInstance("aes-192-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
var src = []byte(strings.Repeat("Hello", 1024))
|
||||
for i := 0; i < b.N; i++ {
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
_ = dst
|
||||
}
|
||||
}
|
||||
72
EdgeNode/internal/utils/encrypt/method_aes_256_cfb.go
Normal file
72
EdgeNode/internal/utils/encrypt/method_aes_256_cfb.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
)
|
||||
|
||||
type AES256CFBMethod struct {
|
||||
block cipher.Block
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Init(key, iv []byte) error {
|
||||
// 判断key是否为32长度
|
||||
l := len(key)
|
||||
if l > 32 {
|
||||
key = key[:32]
|
||||
} else if l < 32 {
|
||||
key = append(key, bytes.Repeat([]byte{' '}, 32-l)...)
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
this.block = block
|
||||
|
||||
// 判断iv长度
|
||||
l2 := len(iv)
|
||||
if l2 > aes.BlockSize {
|
||||
iv = iv[:aes.BlockSize]
|
||||
} else if l2 < aes.BlockSize {
|
||||
iv = append(iv, bytes.Repeat([]byte{' '}, aes.BlockSize-l2)...)
|
||||
}
|
||||
this.iv = iv
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Encrypt(src []byte) (dst []byte, err error) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
dst = make([]byte, len(src))
|
||||
|
||||
encrypter := cipher.NewCFBEncrypter(this.block, this.iv)
|
||||
encrypter.XORKeyStream(dst, src)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (this *AES256CFBMethod) Decrypt(dst []byte) (src []byte, err error) {
|
||||
if len(dst) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = RecoverMethodPanic(recover())
|
||||
}()
|
||||
|
||||
src = make([]byte, len(dst))
|
||||
decrypter := cipher.NewCFBDecrypter(this.block, this.iv)
|
||||
decrypter.XORKeyStream(src, dst)
|
||||
|
||||
return
|
||||
}
|
||||
45
EdgeNode/internal/utils/encrypt/method_aes_256_cfb_test.go
Normal file
45
EdgeNode/internal/utils/encrypt/method_aes_256_cfb_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAES256CFBMethod_Encrypt(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("aes-256-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var src = []byte("Hello, World")
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
src, err = method.Decrypt(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
|
||||
func TestAES256CFBMethod_Encrypt2(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("aes-256-cfb", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var src = []byte("Hello, World")
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
src, err = method.Decrypt(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
26
EdgeNode/internal/utils/encrypt/method_raw.go
Normal file
26
EdgeNode/internal/utils/encrypt/method_raw.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package encrypt
|
||||
|
||||
type RawMethod struct {
|
||||
}
|
||||
|
||||
func (this *RawMethod) Init(key, iv []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *RawMethod) Encrypt(src []byte) (dst []byte, err error) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
dst = make([]byte, len(src))
|
||||
copy(dst, src)
|
||||
return
|
||||
}
|
||||
|
||||
func (this *RawMethod) Decrypt(dst []byte) (src []byte, err error) {
|
||||
if len(dst) == 0 {
|
||||
return
|
||||
}
|
||||
src = make([]byte, len(dst))
|
||||
copy(src, dst)
|
||||
return
|
||||
}
|
||||
26
EdgeNode/internal/utils/encrypt/method_raw_test.go
Normal file
26
EdgeNode/internal/utils/encrypt/method_raw_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRawMethod_Encrypt(t *testing.T) {
|
||||
method, err := encrypt.NewMethodInstance("raw", "abc", "123")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var src = []byte("Hello, World")
|
||||
dst, err := method.Encrypt(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
t.Log("dst:", string(dst))
|
||||
|
||||
src, err = method.Decrypt(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("src:", string(src))
|
||||
}
|
||||
43
EdgeNode/internal/utils/encrypt/method_utils.go
Normal file
43
EdgeNode/internal/utils/encrypt/method_utils.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var methods = map[string]reflect.Type{
|
||||
"raw": reflect.TypeOf(new(RawMethod)).Elem(),
|
||||
"aes-128-cfb": reflect.TypeOf(new(AES128CFBMethod)).Elem(),
|
||||
"aes-192-cfb": reflect.TypeOf(new(AES192CFBMethod)).Elem(),
|
||||
"aes-256-cfb": reflect.TypeOf(new(AES256CFBMethod)).Elem(),
|
||||
}
|
||||
|
||||
func NewMethodInstance(method string, key string, iv string) (MethodInterface, error) {
|
||||
valueType, ok := methods[method]
|
||||
if !ok {
|
||||
return nil, errors.New("method '" + method + "' not found")
|
||||
}
|
||||
instance, ok := reflect.New(valueType).Interface().(MethodInterface)
|
||||
if !ok {
|
||||
return nil, errors.New("method '" + method + "' must implement MethodInterface")
|
||||
}
|
||||
err := instance.Init([]byte(key), []byte(iv))
|
||||
return instance, err
|
||||
}
|
||||
|
||||
func RecoverMethodPanic(err interface{}) error {
|
||||
if err != nil {
|
||||
s, ok := err.(string)
|
||||
if ok {
|
||||
return errors.New(s)
|
||||
}
|
||||
|
||||
e, ok := err.(error)
|
||||
if ok {
|
||||
return e
|
||||
}
|
||||
|
||||
return errors.New("unknown error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
EdgeNode/internal/utils/encrypt/method_utils_test.go
Normal file
11
EdgeNode/internal/utils/encrypt/method_utils_test.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package encrypt_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/encrypt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindMethodInstance(t *testing.T) {
|
||||
t.Log(encrypt.NewMethodInstance("a", "b", ""))
|
||||
t.Log(encrypt.NewMethodInstance("aes-256-cfb", "123456", ""))
|
||||
}
|
||||
80
EdgeNode/internal/utils/encrypt_test.go
Normal file
80
EdgeNode/internal/utils/encrypt_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/maps"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSimpleEncrypt(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var arr = []string{"Hello", "World", "People"}
|
||||
for _, s := range arr {
|
||||
var value = []byte(s)
|
||||
var encoded = utils.SimpleEncrypt(value)
|
||||
t.Log(encoded, string(encoded))
|
||||
var decoded = utils.SimpleDecrypt(encoded)
|
||||
t.Log(decoded, string(decoded))
|
||||
a.IsTrue(s == string(decoded))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleEncryptObject(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
type Obj struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}
|
||||
|
||||
encoded, err := utils.SimpleEncryptObject(&Obj{Name: "lily", Age: 20})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var obj = &Obj{}
|
||||
err = utils.SimpleDecryptObjet(encoded, obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("%#v", obj)
|
||||
a.IsTrue(obj.Name == "lily")
|
||||
a.IsTrue(obj.Age == 20)
|
||||
}
|
||||
|
||||
func TestSimpleEncrypt_Concurrent(t *testing.T) {
|
||||
var wg = sync.WaitGroup{}
|
||||
var arr = []string{"Hello", "World", "People"}
|
||||
wg.Add(len(arr))
|
||||
for _, s := range arr {
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
t.Log(string(utils.SimpleDecrypt(utils.SimpleEncrypt([]byte(s)))))
|
||||
}(s)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestSimpleEncryptMap(t *testing.T) {
|
||||
var m = maps.Map{
|
||||
"s": "Hello",
|
||||
"i": 20,
|
||||
"b": true,
|
||||
}
|
||||
encodedResult, err := utils.SimpleEncryptMap(m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("result:", encodedResult)
|
||||
|
||||
decodedResult, err := utils.SimpleDecryptMap(encodedResult)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(decodedResult)
|
||||
}
|
||||
12
EdgeNode/internal/utils/env_plus.go
Normal file
12
EdgeNode/internal/utils/env_plus.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
//go:build plus
|
||||
|
||||
package utils
|
||||
|
||||
import "os"
|
||||
|
||||
// IsDebugEnv 检查当前是否在调试环境
|
||||
func IsDebugEnv() bool {
|
||||
debug, _ := os.LookupEnv("EdgeDebug")
|
||||
return debug == "on"
|
||||
}
|
||||
8
EdgeNode/internal/utils/errors.go
Normal file
8
EdgeNode/internal/utils/errors.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package utils
|
||||
|
||||
import "github.com/iwind/TeaGo/logs"
|
||||
|
||||
func PrintError(err error) {
|
||||
// TODO 记录调用的文件名、行数
|
||||
logs.Println("[ERROR]" + err.Error())
|
||||
}
|
||||
162
EdgeNode/internal/utils/exec/cmd.go
Normal file
162
EdgeNode/internal/utils/exec/cmd.go
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package executils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Cmd struct {
|
||||
name string
|
||||
args []string
|
||||
env []string
|
||||
dir string
|
||||
|
||||
ctx context.Context
|
||||
timeout time.Duration
|
||||
cancelFunc func()
|
||||
|
||||
captureStdout bool
|
||||
captureStderr bool
|
||||
|
||||
stdout *bytes.Buffer
|
||||
stderr *bytes.Buffer
|
||||
|
||||
rawCmd *exec.Cmd
|
||||
}
|
||||
|
||||
func NewCmd(name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
name: name,
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTimeoutCmd(timeout time.Duration, name string, args ...string) *Cmd {
|
||||
return (&Cmd{
|
||||
name: name,
|
||||
args: args,
|
||||
}).WithTimeout(timeout)
|
||||
}
|
||||
|
||||
func (this *Cmd) WithTimeout(timeout time.Duration) *Cmd {
|
||||
this.timeout = timeout
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), timeout)
|
||||
this.ctx = ctx
|
||||
this.cancelFunc = cancelFunc
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *Cmd) WithStdout() *Cmd {
|
||||
this.captureStdout = true
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *Cmd) WithStderr() *Cmd {
|
||||
this.captureStderr = true
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *Cmd) WithEnv(env []string) *Cmd {
|
||||
this.env = env
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *Cmd) WithDir(dir string) *Cmd {
|
||||
this.dir = dir
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *Cmd) Start() error {
|
||||
var cmd = this.compose()
|
||||
return cmd.Start()
|
||||
}
|
||||
|
||||
func (this *Cmd) Wait() error {
|
||||
var cmd = this.compose()
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
func (this *Cmd) Run() error {
|
||||
if this.cancelFunc != nil {
|
||||
defer this.cancelFunc()
|
||||
}
|
||||
|
||||
var cmd = this.compose()
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (this *Cmd) RawStdout() string {
|
||||
if this.stdout != nil {
|
||||
return this.stdout.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (this *Cmd) Stdout() string {
|
||||
return strings.TrimSpace(this.RawStdout())
|
||||
}
|
||||
|
||||
func (this *Cmd) RawStderr() string {
|
||||
if this.stderr != nil {
|
||||
return this.stderr.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (this *Cmd) Stderr() string {
|
||||
return strings.TrimSpace(this.RawStderr())
|
||||
}
|
||||
|
||||
func (this *Cmd) String() string {
|
||||
if this.rawCmd != nil {
|
||||
return this.rawCmd.String()
|
||||
}
|
||||
var newCmd = exec.Command(this.name, this.args...)
|
||||
return newCmd.String()
|
||||
}
|
||||
|
||||
func (this *Cmd) Process() *os.Process {
|
||||
if this.rawCmd != nil {
|
||||
return this.rawCmd.Process
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Cmd) compose() *exec.Cmd {
|
||||
if this.rawCmd != nil {
|
||||
return this.rawCmd
|
||||
}
|
||||
|
||||
if this.ctx != nil {
|
||||
this.rawCmd = exec.CommandContext(this.ctx, this.name, this.args...)
|
||||
} else {
|
||||
this.rawCmd = exec.Command(this.name, this.args...)
|
||||
}
|
||||
|
||||
if this.env != nil {
|
||||
this.rawCmd.Env = this.env
|
||||
}
|
||||
|
||||
if len(this.dir) > 0 {
|
||||
this.rawCmd.Dir = this.dir
|
||||
}
|
||||
|
||||
if this.captureStdout {
|
||||
this.stdout = &bytes.Buffer{}
|
||||
this.rawCmd.Stdout = this.stdout
|
||||
}
|
||||
if this.captureStderr {
|
||||
this.stderr = &bytes.Buffer{}
|
||||
this.rawCmd.Stderr = this.stderr
|
||||
}
|
||||
|
||||
return this.rawCmd
|
||||
}
|
||||
61
EdgeNode/internal/utils/exec/cmd_test.go
Normal file
61
EdgeNode/internal/utils/exec/cmd_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package executils_test
|
||||
|
||||
import (
|
||||
executils "github.com/TeaOSLab/EdgeNode/internal/utils/exec"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewTimeoutCmd_Sleep(t *testing.T) {
|
||||
var cmd = executils.NewTimeoutCmd(1*time.Second, "sleep", "3")
|
||||
cmd.WithStdout()
|
||||
cmd.WithStderr()
|
||||
err := cmd.Run()
|
||||
t.Log("error:", err)
|
||||
t.Log("stdout:", cmd.Stdout())
|
||||
t.Log("stderr:", cmd.Stderr())
|
||||
}
|
||||
|
||||
func TestNewTimeoutCmd_Echo(t *testing.T) {
|
||||
var cmd = executils.NewTimeoutCmd(10*time.Second, "echo", "-n", "hello")
|
||||
cmd.WithStdout()
|
||||
cmd.WithStderr()
|
||||
err := cmd.Run()
|
||||
t.Log("error:", err)
|
||||
t.Log("stdout:", cmd.Stdout())
|
||||
t.Log("stderr:", cmd.Stderr())
|
||||
}
|
||||
|
||||
func TestNewTimeoutCmd_Echo2(t *testing.T) {
|
||||
var cmd = executils.NewCmd("echo", "hello")
|
||||
cmd.WithStdout()
|
||||
cmd.WithStderr()
|
||||
err := cmd.Run()
|
||||
t.Log("error:", err)
|
||||
t.Log("stdout:", cmd.Stdout())
|
||||
t.Log("raw stdout:", cmd.RawStdout())
|
||||
t.Log("stderr:", cmd.Stderr())
|
||||
t.Log("raw stderr:", cmd.RawStderr())
|
||||
}
|
||||
|
||||
func TestNewTimeoutCmd_Echo3(t *testing.T) {
|
||||
var cmd = executils.NewCmd("echo", "-n", "hello")
|
||||
err := cmd.Run()
|
||||
t.Log("error:", err)
|
||||
t.Log("stdout:", cmd.Stdout())
|
||||
t.Log("stderr:", cmd.Stderr())
|
||||
}
|
||||
|
||||
func TestCmd_Process(t *testing.T) {
|
||||
var cmd = executils.NewCmd("echo", "-n", "hello")
|
||||
err := cmd.Run()
|
||||
t.Log("error:", err)
|
||||
t.Log(cmd.Process())
|
||||
}
|
||||
|
||||
func TestNewTimeoutCmd_String(t *testing.T) {
|
||||
var cmd = executils.NewCmd("echo", "-n", "hello")
|
||||
t.Log("stdout:", cmd.String())
|
||||
}
|
||||
58
EdgeNode/internal/utils/exec/look_linux.go
Normal file
58
EdgeNode/internal/utils/exec/look_linux.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
//go:build linux
|
||||
|
||||
package executils
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// LookPath customize our LookPath() function, to work in broken $PATH environment variable
|
||||
func LookPath(file string) (string, error) {
|
||||
result, err := exec.LookPath(file)
|
||||
if err == nil && len(result) > 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// add common dirs contains executable files these may be excluded in $PATH environment variable
|
||||
var binPaths = []string{
|
||||
"/usr/sbin",
|
||||
"/usr/bin",
|
||||
"/usr/local/sbin",
|
||||
"/usr/local/bin",
|
||||
}
|
||||
|
||||
for _, binPath := range binPaths {
|
||||
var fullPath = binPath + string(os.PathSeparator) + file
|
||||
|
||||
stat, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return "", syscall.EISDIR
|
||||
}
|
||||
|
||||
var mode = stat.Mode()
|
||||
if mode.IsDir() {
|
||||
return "", syscall.EISDIR
|
||||
}
|
||||
err = syscall.Faccessat(unix.AT_FDCWD, fullPath, unix.X_OK, unix.AT_EACCESS)
|
||||
if err == nil || (err != syscall.ENOSYS && err != syscall.EPERM) {
|
||||
return fullPath, err
|
||||
}
|
||||
if mode&0111 != 0 {
|
||||
return fullPath, nil
|
||||
}
|
||||
return "", fs.ErrPermission
|
||||
}
|
||||
|
||||
return "", &exec.Error{
|
||||
Name: file,
|
||||
Err: exec.ErrNotFound,
|
||||
}
|
||||
}
|
||||
10
EdgeNode/internal/utils/exec/look_others.go
Normal file
10
EdgeNode/internal/utils/exec/look_others.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
//go:build !linux
|
||||
|
||||
package executils
|
||||
|
||||
import "os/exec"
|
||||
|
||||
func LookPath(file string) (string, error) {
|
||||
return exec.LookPath(file)
|
||||
}
|
||||
13
EdgeNode/internal/utils/exit.go
Normal file
13
EdgeNode/internal/utils/exit.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/events"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Exit() {
|
||||
events.Notify(events.EventTerminated)
|
||||
os.Exit(0)
|
||||
}
|
||||
68
EdgeNode/internal/utils/expires/id_key_map.go
Normal file
68
EdgeNode/internal/utils/expires/id_key_map.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package expires
|
||||
|
||||
type IdKeyMap struct {
|
||||
idKeys map[int64]string // id => key
|
||||
keyIds map[string]int64 // key => id
|
||||
}
|
||||
|
||||
func NewIdKeyMap() *IdKeyMap {
|
||||
return &IdKeyMap{
|
||||
idKeys: map[int64]string{},
|
||||
keyIds: map[string]int64{},
|
||||
}
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) Add(id int64, key string) {
|
||||
oldKey, ok := this.idKeys[id]
|
||||
if ok {
|
||||
delete(this.keyIds, oldKey)
|
||||
}
|
||||
|
||||
oldId, ok := this.keyIds[key]
|
||||
if ok {
|
||||
delete(this.idKeys, oldId)
|
||||
}
|
||||
|
||||
this.idKeys[id] = key
|
||||
this.keyIds[key] = id
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) Key(id int64) (key string, ok bool) {
|
||||
key, ok = this.idKeys[id]
|
||||
return
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) Id(key string) (id int64, ok bool) {
|
||||
id, ok = this.keyIds[key]
|
||||
return
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) DeleteId(id int64) {
|
||||
key, ok := this.idKeys[id]
|
||||
if ok {
|
||||
delete(this.keyIds, key)
|
||||
}
|
||||
delete(this.idKeys, id)
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) DeleteKey(key string) {
|
||||
id, ok := this.keyIds[key]
|
||||
if ok {
|
||||
delete(this.idKeys, id)
|
||||
}
|
||||
delete(this.keyIds, key)
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) Len() int {
|
||||
return len(this.idKeys)
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) IdKeys() map[int64]string {
|
||||
return this.idKeys
|
||||
}
|
||||
|
||||
func (this *IdKeyMap) KeyIds() map[string]int64 {
|
||||
return this.keyIds
|
||||
}
|
||||
47
EdgeNode/internal/utils/expires/id_key_map_test.go
Normal file
47
EdgeNode/internal/utils/expires/id_key_map_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package expires_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/expires"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewIdKeyMap(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var m = expires.NewIdKeyMap()
|
||||
m.Add(1, "1")
|
||||
m.Add(1, "2")
|
||||
m.Add(100, "100")
|
||||
logs.PrintAsJSON(m.IdKeys(), t)
|
||||
logs.PrintAsJSON(m.KeyIds(), t)
|
||||
|
||||
{
|
||||
k, ok := m.Key(1)
|
||||
a.IsTrue(ok)
|
||||
a.IsTrue(k == "2")
|
||||
}
|
||||
|
||||
{
|
||||
_, ok := m.Key(2)
|
||||
a.IsFalse(ok)
|
||||
}
|
||||
|
||||
m.DeleteKey("2")
|
||||
|
||||
{
|
||||
_, ok := m.Key(1)
|
||||
a.IsFalse(ok)
|
||||
}
|
||||
|
||||
logs.PrintAsJSON(m.IdKeys(), t)
|
||||
logs.PrintAsJSON(m.KeyIds(), t)
|
||||
|
||||
m.DeleteId(100)
|
||||
|
||||
logs.PrintAsJSON(m.IdKeys(), t)
|
||||
logs.PrintAsJSON(m.KeyIds(), t)
|
||||
}
|
||||
176
EdgeNode/internal/utils/expires/list.go
Normal file
176
EdgeNode/internal/utils/expires/list.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package expires
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type ItemMap = map[uint64]zero.Zero
|
||||
|
||||
type List struct {
|
||||
expireMap map[int64]ItemMap // expires timestamp => map[id]ItemMap
|
||||
itemsMap map[uint64]int64 // itemId => timestamp
|
||||
|
||||
mu sync.RWMutex
|
||||
|
||||
gcCallback func(itemId uint64)
|
||||
gcBatchCallback func(itemIds ItemMap)
|
||||
|
||||
lastTimestamp int64
|
||||
}
|
||||
|
||||
func NewList() *List {
|
||||
var list = &List{
|
||||
expireMap: map[int64]ItemMap{},
|
||||
itemsMap: map[uint64]int64{},
|
||||
}
|
||||
|
||||
SharedManager.Add(list)
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func NewSingletonList() *List {
|
||||
var list = &List{
|
||||
expireMap: map[int64]ItemMap{},
|
||||
itemsMap: map[uint64]int64{},
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
// Add 添加条目
|
||||
// 如果条目已经存在,则覆盖
|
||||
func (this *List) Add(itemId uint64, expiresAt int64) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if this.lastTimestamp == 0 || this.lastTimestamp > expiresAt {
|
||||
this.lastTimestamp = expiresAt
|
||||
}
|
||||
|
||||
// 是否已经存在
|
||||
oldExpiresAt, ok := this.itemsMap[itemId]
|
||||
if ok {
|
||||
if oldExpiresAt == expiresAt {
|
||||
return
|
||||
}
|
||||
delete(this.expireMap[oldExpiresAt], itemId)
|
||||
if len(this.expireMap[oldExpiresAt]) == 0 {
|
||||
delete(this.expireMap, oldExpiresAt)
|
||||
}
|
||||
}
|
||||
|
||||
expireItemMap, ok := this.expireMap[expiresAt]
|
||||
if ok {
|
||||
expireItemMap[itemId] = zero.New()
|
||||
} else {
|
||||
this.expireMap[expiresAt] = ItemMap{
|
||||
itemId: zero.New(),
|
||||
}
|
||||
}
|
||||
|
||||
this.itemsMap[itemId] = expiresAt
|
||||
}
|
||||
|
||||
func (this *List) Remove(itemId uint64) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
this.removeItem(itemId)
|
||||
}
|
||||
|
||||
func (this *List) ExpiresAt(itemId uint64) int64 {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
return this.itemsMap[itemId]
|
||||
}
|
||||
|
||||
func (this *List) GC(timestamp int64) ItemMap {
|
||||
if this.lastTimestamp > timestamp+1 {
|
||||
return nil
|
||||
}
|
||||
var itemMap = this.gcItems(timestamp)
|
||||
if len(itemMap) == 0 {
|
||||
return itemMap
|
||||
}
|
||||
|
||||
if this.gcCallback != nil {
|
||||
for itemId := range itemMap {
|
||||
this.gcCallback(itemId)
|
||||
}
|
||||
}
|
||||
if this.gcBatchCallback != nil {
|
||||
this.gcBatchCallback(itemMap)
|
||||
}
|
||||
|
||||
return itemMap
|
||||
}
|
||||
|
||||
func (this *List) Clean() {
|
||||
this.mu.Lock()
|
||||
this.itemsMap = map[uint64]int64{}
|
||||
this.expireMap = map[int64]ItemMap{}
|
||||
this.mu.Unlock()
|
||||
}
|
||||
|
||||
func (this *List) Count() int {
|
||||
this.mu.RLock()
|
||||
var count = len(this.itemsMap)
|
||||
this.mu.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
func (this *List) OnGC(callback func(itemId uint64)) *List {
|
||||
this.gcCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *List) OnGCBatch(callback func(itemMap ItemMap)) *List {
|
||||
this.gcBatchCallback = callback
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *List) ExpireMap() map[int64]ItemMap {
|
||||
return this.expireMap
|
||||
}
|
||||
|
||||
func (this *List) ItemsMap() map[uint64]int64 {
|
||||
return this.itemsMap
|
||||
}
|
||||
|
||||
func (this *List) LastTimestamp() int64 {
|
||||
return this.lastTimestamp
|
||||
}
|
||||
|
||||
func (this *List) removeItem(itemId uint64) {
|
||||
expiresAt, ok := this.itemsMap[itemId]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(this.itemsMap, itemId)
|
||||
|
||||
expireItemMap, ok := this.expireMap[expiresAt]
|
||||
if ok {
|
||||
delete(expireItemMap, itemId)
|
||||
if len(expireItemMap) == 0 {
|
||||
delete(this.expireMap, expiresAt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *List) gcItems(timestamp int64) ItemMap {
|
||||
this.mu.RLock()
|
||||
expireItemsMap, ok := this.expireMap[timestamp]
|
||||
this.mu.RUnlock()
|
||||
|
||||
if ok {
|
||||
this.mu.Lock()
|
||||
for itemId := range expireItemsMap {
|
||||
delete(this.itemsMap, itemId)
|
||||
}
|
||||
delete(this.expireMap, timestamp)
|
||||
this.mu.Unlock()
|
||||
}
|
||||
|
||||
return expireItemsMap
|
||||
}
|
||||
260
EdgeNode/internal/utils/expires/list_test.go
Normal file
260
EdgeNode/internal/utils/expires/list_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package expires_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/expires"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
|
||||
"github.com/iwind/TeaGo/assert"
|
||||
"github.com/iwind/TeaGo/logs"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"math"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestList_Add(t *testing.T) {
|
||||
var list = expires.NewList()
|
||||
list.Add(1, time.Now().Unix())
|
||||
t.Log("===BEFORE===")
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
|
||||
list.Add(1, time.Now().Unix()+1)
|
||||
list.Add(2, time.Now().Unix()+1)
|
||||
list.Add(3, time.Now().Unix()+2)
|
||||
t.Log("===AFTER===")
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
}
|
||||
|
||||
func TestList_Add_Overwrite(t *testing.T) {
|
||||
var timestamp = time.Now().Unix()
|
||||
|
||||
var list = expires.NewList()
|
||||
list.Add(1, timestamp+1)
|
||||
list.Add(1, timestamp+1)
|
||||
list.Add(2, timestamp+1)
|
||||
list.Add(1, timestamp+2)
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
|
||||
var a = assert.NewAssertion(t)
|
||||
a.IsTrue(len(list.ItemsMap()) == 2)
|
||||
a.IsTrue(len(list.ExpireMap()) == 2)
|
||||
a.IsTrue(list.ItemsMap()[1] == timestamp+2)
|
||||
}
|
||||
|
||||
func TestList_Remove(t *testing.T) {
|
||||
var a = assert.NewAssertion(t)
|
||||
|
||||
var list = expires.NewList()
|
||||
list.Add(1, time.Now().Unix()+1)
|
||||
list.Remove(1)
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
|
||||
a.IsTrue(len(list.ExpireMap()) == 0)
|
||||
a.IsTrue(len(list.ItemsMap()) == 0)
|
||||
}
|
||||
|
||||
func TestList_GC(t *testing.T) {
|
||||
var unixTime = time.Now().Unix()
|
||||
t.Log("unixTime:", unixTime)
|
||||
|
||||
var list = expires.NewList()
|
||||
list.Add(1, unixTime+1)
|
||||
list.Add(2, unixTime+1)
|
||||
list.Add(3, unixTime+2)
|
||||
list.OnGC(func(itemId uint64) {
|
||||
t.Log("gc:", itemId)
|
||||
})
|
||||
t.Log("last unixTime:", list.LastTimestamp())
|
||||
list.GC(time.Now().Unix() + 2)
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
|
||||
t.Log(list.Count())
|
||||
}
|
||||
|
||||
func TestList_GC_Batch(t *testing.T) {
|
||||
var list = expires.NewList()
|
||||
list.Add(1, time.Now().Unix()+1)
|
||||
list.Add(2, time.Now().Unix()+1)
|
||||
list.Add(3, time.Now().Unix()+2)
|
||||
list.Add(4, time.Now().Unix()+2)
|
||||
list.OnGCBatch(func(itemMap expires.ItemMap) {
|
||||
t.Log("gc:", itemMap)
|
||||
})
|
||||
list.GC(time.Now().Unix() + 2)
|
||||
logs.PrintAsJSON(list.ExpireMap(), t)
|
||||
logs.PrintAsJSON(list.ItemsMap(), t)
|
||||
}
|
||||
|
||||
func TestList_Start_GC(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var list = expires.NewList()
|
||||
list.Add(1, time.Now().Unix()+1)
|
||||
list.Add(2, time.Now().Unix()+1)
|
||||
list.Add(3, time.Now().Unix()+2)
|
||||
list.Add(3, time.Now().Unix()+10)
|
||||
list.Add(4, time.Now().Unix()+5)
|
||||
list.Add(5, time.Now().Unix()+5)
|
||||
list.Add(6, time.Now().Unix()+6)
|
||||
list.Add(7, time.Now().Unix()+6)
|
||||
list.Add(8, time.Now().Unix()+6)
|
||||
|
||||
list.OnGC(func(itemId uint64) {
|
||||
t.Log("gc:", itemId, timeutil.Format("H:i:s"))
|
||||
time.Sleep(2 * time.Second)
|
||||
})
|
||||
|
||||
go func() {
|
||||
expires.SharedManager.Add(list)
|
||||
}()
|
||||
|
||||
time.Sleep(20 * time.Second)
|
||||
logs.PrintAsJSON(list.ItemsMap())
|
||||
logs.PrintAsJSON(list.ExpireMap())
|
||||
}
|
||||
|
||||
func TestList_ManyItems(t *testing.T) {
|
||||
var list = expires.NewList()
|
||||
for i := 0; i < 1_000; i++ {
|
||||
list.Add(uint64(i), time.Now().Unix())
|
||||
}
|
||||
for i := 0; i < 1_000; i++ {
|
||||
list.Add(uint64(i), time.Now().Unix()+1)
|
||||
}
|
||||
|
||||
var now = time.Now()
|
||||
var count = 0
|
||||
list.OnGC(func(itemId uint64) {
|
||||
count++
|
||||
})
|
||||
list.GC(time.Now().Unix() + 1)
|
||||
t.Log("gc", count, "items")
|
||||
t.Log(time.Since(now))
|
||||
}
|
||||
|
||||
func TestList_Memory(t *testing.T) {
|
||||
if !testutils.IsSingleTesting() {
|
||||
return
|
||||
}
|
||||
|
||||
var list = expires.NewList()
|
||||
|
||||
testutils.StartMemoryStats(t, func() {
|
||||
t.Log(list.Count(), "items")
|
||||
})
|
||||
|
||||
for i := 0; i < 10_000_000; i++ {
|
||||
list.Add(uint64(i), time.Now().Unix()+1800)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Hour)
|
||||
}
|
||||
|
||||
func TestList_Map_Performance(t *testing.T) {
|
||||
t.Log("max uint32", math.MaxUint32)
|
||||
|
||||
var timestamp = time.Now().Unix()
|
||||
|
||||
{
|
||||
var m = map[int64]int64{}
|
||||
for i := 0; i < 1_000_000; i++ {
|
||||
m[int64(i)] = timestamp
|
||||
}
|
||||
|
||||
var now = time.Now()
|
||||
for i := 0; i < 100_000; i++ {
|
||||
delete(m, int64(i))
|
||||
}
|
||||
t.Log(time.Since(now))
|
||||
}
|
||||
|
||||
{
|
||||
var m = map[uint64]int64{}
|
||||
for i := 0; i < 1_000_000; i++ {
|
||||
m[uint64(i)] = timestamp
|
||||
}
|
||||
|
||||
var now = time.Now()
|
||||
for i := 0; i < 100_000; i++ {
|
||||
delete(m, uint64(i))
|
||||
}
|
||||
t.Log(time.Since(now))
|
||||
}
|
||||
|
||||
{
|
||||
var m = map[uint32]int64{}
|
||||
for i := 0; i < 1_000_000; i++ {
|
||||
m[uint32(i)] = timestamp
|
||||
}
|
||||
|
||||
var now = time.Now()
|
||||
for i := 0; i < 100_000; i++ {
|
||||
delete(m, uint32(i))
|
||||
}
|
||||
t.Log(time.Since(now))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkList_Add(b *testing.B) {
|
||||
var list = expires.NewList()
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
list.Add(rand.Uint64(), fasttime.Now().Unix()+int64(rand.Int()%10_000_000))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_Map_Uint64(b *testing.B) {
|
||||
runtime.GOMAXPROCS(1)
|
||||
var timestamp = uint64(time.Now().Unix())
|
||||
|
||||
var i uint64
|
||||
var count uint64 = 1_000_000
|
||||
|
||||
var m = map[uint64]uint64{}
|
||||
for i = 0; i < count; i++ {
|
||||
m[i] = timestamp
|
||||
}
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i = 0; i < count; i++ {
|
||||
_ = m[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkList_GC(b *testing.B) {
|
||||
runtime.GOMAXPROCS(4)
|
||||
|
||||
var lists = []*expires.List{}
|
||||
|
||||
for m := 0; m < 1_000; m++ {
|
||||
var list = expires.NewList()
|
||||
for j := 0; j < 10_000; j++ {
|
||||
list.Add(uint64(j), fasttime.Now().Unix()+int64(rand.Int()%10_000_000))
|
||||
}
|
||||
lists = append(lists, list)
|
||||
}
|
||||
|
||||
var timestamp = time.Now().Unix()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
for _, list := range lists {
|
||||
list.GC(timestamp + int64(rand.Int()%1_000_000))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
73
EdgeNode/internal/utils/expires/manager.go
Normal file
73
EdgeNode/internal/utils/expires/manager.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package expires
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var SharedManager = NewManager()
|
||||
|
||||
type Manager struct {
|
||||
listMap map[*List]zero.Zero
|
||||
locker sync.Mutex
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
func NewManager() *Manager {
|
||||
var manager = &Manager{
|
||||
listMap: map[*List]zero.Zero{},
|
||||
ticker: time.NewTicker(1 * time.Second),
|
||||
}
|
||||
goman.New(func() {
|
||||
manager.init()
|
||||
})
|
||||
return manager
|
||||
}
|
||||
|
||||
func (this *Manager) init() {
|
||||
var lastTimestamp = int64(0)
|
||||
for range this.ticker.C {
|
||||
var currentTime = time.Now().Unix()
|
||||
if lastTimestamp == 0 {
|
||||
lastTimestamp = currentTime - 86400 // prevent timezone changes
|
||||
}
|
||||
|
||||
if currentTime >= lastTimestamp {
|
||||
for i := lastTimestamp; i <= currentTime; i++ {
|
||||
this.locker.Lock()
|
||||
for list := range this.listMap {
|
||||
list.GC(i)
|
||||
}
|
||||
this.locker.Unlock()
|
||||
}
|
||||
} else {
|
||||
// 如果过去的时间比现在大,则从这一秒重新开始
|
||||
for i := currentTime; i <= currentTime; i++ {
|
||||
this.locker.Lock()
|
||||
for list := range this.listMap {
|
||||
list.GC(i)
|
||||
}
|
||||
this.locker.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// 这样做是为了防止系统时钟突变
|
||||
lastTimestamp = currentTime
|
||||
}
|
||||
}
|
||||
|
||||
func (this *Manager) Add(list *List) {
|
||||
this.locker.Lock()
|
||||
this.listMap[list] = zero.New()
|
||||
this.locker.Unlock()
|
||||
}
|
||||
|
||||
func (this *Manager) Remove(list *List) {
|
||||
this.locker.Lock()
|
||||
delete(this.listMap, list)
|
||||
this.locker.Unlock()
|
||||
}
|
||||
99
EdgeNode/internal/utils/fasttime/time_fast.go
Normal file
99
EdgeNode/internal/utils/fasttime/time_fast.go
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package fasttime
|
||||
|
||||
import (
|
||||
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"time"
|
||||
)
|
||||
|
||||
var sharedFastTime = NewFastTime()
|
||||
|
||||
func init() {
|
||||
if !teaconst.IsMain {
|
||||
return
|
||||
}
|
||||
|
||||
var ticker = time.NewTicker(200 * time.Millisecond)
|
||||
goman.New(func() {
|
||||
for range ticker.C {
|
||||
sharedFastTime = NewFastTime()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Now() *FastTime {
|
||||
return sharedFastTime
|
||||
}
|
||||
|
||||
type FastTime struct {
|
||||
rawTime time.Time
|
||||
unixTime int64
|
||||
unixTimeMilli int64
|
||||
unixTimeMilliString string
|
||||
ymd string
|
||||
round5Hi string
|
||||
hour int
|
||||
}
|
||||
|
||||
func NewFastTime() *FastTime {
|
||||
var rawTime = time.Now()
|
||||
|
||||
return &FastTime{
|
||||
rawTime: rawTime,
|
||||
unixTime: rawTime.Unix(),
|
||||
unixTimeMilli: rawTime.UnixMilli(),
|
||||
unixTimeMilliString: types.String(rawTime.UnixMilli()),
|
||||
ymd: timeutil.Format("Ymd", rawTime),
|
||||
round5Hi: timeutil.FormatTime("Hi", rawTime.Unix()/300*300),
|
||||
hour: rawTime.Hour(),
|
||||
}
|
||||
}
|
||||
|
||||
// Unix 最快获取时间戳的方式,通常用在不需要特别精确时间戳的场景
|
||||
func (this *FastTime) Unix() int64 {
|
||||
return this.unixTime
|
||||
}
|
||||
|
||||
// UnixFloor 取整
|
||||
func (this *FastTime) UnixFloor(seconds int) int64 {
|
||||
return this.unixTime / int64(seconds) * int64(seconds)
|
||||
}
|
||||
|
||||
// UnixCell 取整并加1
|
||||
func (this *FastTime) UnixCell(seconds int) int64 {
|
||||
return this.unixTime/int64(seconds)*int64(seconds) + int64(seconds)
|
||||
}
|
||||
|
||||
// UnixNextMinute 获取下一分钟开始的时间戳
|
||||
func (this *FastTime) UnixNextMinute() int64 {
|
||||
return this.UnixCell(60)
|
||||
}
|
||||
|
||||
// UnixMilli 获取时间戳,精确到毫秒
|
||||
func (this *FastTime) UnixMilli() int64 {
|
||||
return this.unixTimeMilli
|
||||
}
|
||||
|
||||
func (this *FastTime) UnixMilliString() (int64, string) {
|
||||
return this.unixTimeMilli, this.unixTimeMilliString
|
||||
}
|
||||
|
||||
func (this *FastTime) Ymd() string {
|
||||
return this.ymd
|
||||
}
|
||||
|
||||
func (this *FastTime) Round5Hi() string {
|
||||
return this.round5Hi
|
||||
}
|
||||
|
||||
func (this *FastTime) Format(layout string) string {
|
||||
return timeutil.Format(layout, this.rawTime)
|
||||
}
|
||||
|
||||
func (this *FastTime) Hour() int {
|
||||
return this.hour
|
||||
}
|
||||
62
EdgeNode/internal/utils/fasttime/time_fast_test.go
Normal file
62
EdgeNode/internal/utils/fasttime/time_fast_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package fasttime_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
|
||||
timeutil "github.com/iwind/TeaGo/utils/time"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFastTime_Unix(t *testing.T) {
|
||||
for i := 0; i < 5; i++ {
|
||||
var now = fasttime.Now()
|
||||
t.Log(now.Unix(), now.UnixMilli(), "real:", time.Now().Unix())
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFastTime_UnixMilli(t *testing.T) {
|
||||
t.Log(fasttime.Now().UnixMilliString())
|
||||
}
|
||||
|
||||
func TestFastTime_UnixFloor(t *testing.T) {
|
||||
var now = fasttime.Now()
|
||||
|
||||
var timestamp = time.Now().Unix()
|
||||
t.Log("floor 60:", timestamp, now.UnixFloor(60), timeutil.FormatTime("Y-m-d H:i:s", now.UnixFloor(60)))
|
||||
t.Log("ceil 60:", timestamp, now.UnixCell(60), timeutil.FormatTime("Y-m-d H:i:s", now.UnixCell(60)))
|
||||
t.Log("floor 300:", timestamp, now.UnixFloor(300), timeutil.FormatTime("Y-m-d H:i:s", now.UnixFloor(300)))
|
||||
t.Log("next minute:", now.UnixNextMinute(), timeutil.FormatTime("Y-m-d H:i:s", now.UnixNextMinute()))
|
||||
t.Log("day:", now.Ymd())
|
||||
t.Log("round 5 minute:", now.Round5Hi())
|
||||
}
|
||||
|
||||
func TestFastTime_Format(t *testing.T) {
|
||||
var now = fasttime.Now()
|
||||
t.Log(now.Format("Y-m-d H:i:s"))
|
||||
}
|
||||
|
||||
func TestFastTime_Hour(t *testing.T) {
|
||||
var now = fasttime.Now()
|
||||
t.Log(now.Hour())
|
||||
}
|
||||
|
||||
func BenchmarkNewFastTime(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var now = fasttime.Now()
|
||||
_ = now.Ymd()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkNewFastTime_Raw(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var now = time.Now()
|
||||
_ = timeutil.Format("Ymd", now)
|
||||
}
|
||||
})
|
||||
}
|
||||
30
EdgeNode/internal/utils/fnv/hash.go
Normal file
30
EdgeNode/internal/utils/fnv/hash.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package fnv
|
||||
|
||||
const (
|
||||
offset64 uint64 = 14695981039346656037
|
||||
prime64 uint64 = 1099511628211
|
||||
)
|
||||
|
||||
// HashString
|
||||
// 非unique Hash
|
||||
func HashString(key string) uint64 {
|
||||
var hash = offset64
|
||||
for _, b := range key {
|
||||
hash ^= uint64(b)
|
||||
hash *= prime64
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// Hash
|
||||
// 非unique Hash
|
||||
func Hash(key []byte) uint64 {
|
||||
var hash = offset64
|
||||
for _, b := range key {
|
||||
hash ^= uint64(b)
|
||||
hash *= prime64
|
||||
}
|
||||
return hash
|
||||
}
|
||||
32
EdgeNode/internal/utils/fnv/hash_test.go
Normal file
32
EdgeNode/internal/utils/fnv/hash_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
|
||||
|
||||
package fnv_test
|
||||
|
||||
import (
|
||||
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
|
||||
"github.com/iwind/TeaGo/types"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHash(t *testing.T) {
|
||||
for _, key := range []string{"costarring", "liquid", "hello"} {
|
||||
var h = fnv.HashString(key)
|
||||
t.Log(key + " => " + types.String(h))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashString(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_ = fnv.HashString("abcdefh")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkHashString_Long(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_ = fnv.HashString("HELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLDHELLO,WORLD")
|
||||
}
|
||||
})
|
||||
}
|
||||
126
EdgeNode/internal/utils/fs/disk.go
Normal file
126
EdgeNode/internal/utils/fs/disk.go
Normal file
@@ -0,0 +1,126 @@
|
||||
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
|
||||
|
||||
package fsutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/iwind/TeaGo/Tea"
|
||||
"math"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const diskSpeedDataFile = "disk.speed.json"
|
||||
|
||||
type DiskSpeedCache struct {
|
||||
Speed Speed `json:"speed"`
|
||||
SpeedMB float64 `json:"speedMB"`
|
||||
CountTests int `json:"countTests"` // test times
|
||||
}
|
||||
|
||||
// CheckDiskWritingSpeed test disk writing speed
|
||||
func CheckDiskWritingSpeed() (speedMB float64, err error) {
|
||||
var tempDir = os.TempDir()
|
||||
if len(tempDir) == 0 {
|
||||
tempDir = "/tmp"
|
||||
}
|
||||
|
||||
const filename = "edge-disk-writing-test.data"
|
||||
var path = tempDir + "/" + filename
|
||||
_ = os.Remove(path) // always try to delete the file
|
||||
|
||||
fp, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var isClosed bool
|
||||
defer func() {
|
||||
if !isClosed {
|
||||
_ = fp.Close()
|
||||
}
|
||||
|
||||
_ = os.Remove(path)
|
||||
}()
|
||||
|
||||
var data = bytes.Repeat([]byte{'A'}, 16<<20)
|
||||
var before = time.Now()
|
||||
_, err = fp.Write(data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = fp.Sync()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = fp.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var costSeconds = time.Since(before).Seconds()
|
||||
speedMB = float64(len(data)) / (1 << 20) / costSeconds
|
||||
speedMB = math.Ceil(speedMB/10) * 10
|
||||
|
||||
isClosed = true
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CheckDiskIsFast check disk is 'fast' disk to write
|
||||
func CheckDiskIsFast() (speedMB float64, isFast bool, err error) {
|
||||
speedMB, err = CheckDiskWritingSpeed()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// read old cached info
|
||||
var cacheFile = Tea.Root + "/data/" + diskSpeedDataFile
|
||||
var cacheInfo = &DiskSpeedCache{}
|
||||
{
|
||||
cacheData, cacheErr := os.ReadFile(cacheFile)
|
||||
if cacheErr == nil {
|
||||
var oldCacheInfo = &DiskSpeedCache{}
|
||||
cacheErr = json.Unmarshal(cacheData, oldCacheInfo)
|
||||
if cacheErr == nil {
|
||||
cacheInfo = oldCacheInfo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cacheInfo.CountTests++
|
||||
|
||||
defer func() {
|
||||
// write to local file
|
||||
cacheData, jsonErr := json.Marshal(cacheInfo)
|
||||
if jsonErr == nil {
|
||||
_ = os.WriteFile(cacheFile, cacheData, 0666)
|
||||
}
|
||||
}()
|
||||
|
||||
isFast = speedMB > 150
|
||||
|
||||
if speedMB <= DiskSpeedMB {
|
||||
return
|
||||
}
|
||||
|
||||
if speedMB > 1000 {
|
||||
DiskSpeed = SpeedExtremelyFast
|
||||
} else if speedMB > 150 {
|
||||
DiskSpeed = SpeedFast
|
||||
} else if speedMB > 60 {
|
||||
DiskSpeed = SpeedLow
|
||||
} else {
|
||||
DiskSpeed = SpeedExtremelySlow
|
||||
}
|
||||
|
||||
DiskSpeedMB = speedMB
|
||||
|
||||
cacheInfo.Speed = DiskSpeed
|
||||
cacheInfo.SpeedMB = DiskSpeedMB
|
||||
|
||||
return
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user