Initial commit (code only without large binaries)

This commit is contained in:
robin
2026-02-15 18:58:44 +08:00
commit 35df75498f
9442 changed files with 1495866 additions and 0 deletions

16
EdgeNode/.gitignore vendored Normal file
View File

@@ -0,0 +1,16 @@
# Windows local development
*_windows.go
configs/api_node.yaml
# IDE
.idea/
.vscode/
# Build binaries
bin/
# Runtime Data
data/
configs/node.json
logs/
opt/

76
EdgeNode/.golangci.yaml Normal file
View File

@@ -0,0 +1,76 @@
# https://golangci-lint.run/usage/configuration/
linters:
enable-all: true
disable:
- ifshort
- exhaustivestruct
- golint
- nosnakecase
- scopelint
- varcheck
- structcheck
- interfacer
- maligned
- deadcode
- dogsled
- wrapcheck
- wastedassign
- varnamelen
- testpackage
- thelper
- nilerr
- sqlclosecheck
- paralleltest
- nonamedreturns
- nlreturn
- nakedret
- ireturn
- interfacebloat
- gosmopolitan
- gomnd
- goerr113
- gochecknoglobals
- exhaustruct
- errorlint
- depguard
- exhaustive
- containedctx
- wsl
- cyclop
- dupword
- errchkjson
- contextcheck
- tagalign
- dupl
- forbidigo
- funlen
- goconst
- godox
- gosec
- lll
- nestif
- revive
- unparam
- stylecheck
- gocritic
- gofumpt
- gomoddirectives
- godot
- gofmt
- gocognit
- mirror
- gocyclo
- gochecknoinits
- gci
- maintidx
- prealloc
- goimports
- errname
- musttag
- forcetypeassert
- whitespace
- noctx
- rowserrcheck
- tagliatelle
- protogetter

29
EdgeNode/LICENSE Normal file
View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2020, LiuXiangChao
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1
EdgeNode/README.md Normal file
View File

@@ -0,0 +1 @@
GoEdge边缘节点源码

3
EdgeNode/build/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
bin/*
caches
upload.sh

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
./build.sh linux amd64 plus
#./build.sh linux 386 plus
#./build.sh linux arm64 plus
#./build.sh linux mips64 plus
#./build.sh linux mips64le plus
#./build.sh darwin amd64 plus
#./build.sh darwin arm64 plus

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
./build.sh linux amd64
#./build.sh linux 386
#./build.sh linux arm64
#./build.sh linux mips64
#./build.sh linux mips64le
#./build.sh darwin amd64
#./build.sh darwin arm64

260
EdgeNode/build/build.sh Normal file
View File

@@ -0,0 +1,260 @@
#!/usr/bin/env bash
function build() {
ROOT=$(dirname $0)
NAME="edge-node"
VERSION=$(lookup-version "$ROOT"/../internal/const/const.go)
DIST=$ROOT/"../dist/${NAME}"
MUSL_DIR="/usr/local/opt/musl-cross/bin"
SRCDIR=$(realpath "$ROOT/..")
# for macOS users: precompiled gcc can be downloaded from https://github.com/messense/homebrew-macos-cross-toolchains
GCC_X86_64_DIR="/usr/local/gcc/x86_64-unknown-linux-gnu/bin"
GCC_ARM64_DIR="/usr/local/gcc/aarch64-unknown-linux-gnu/bin"
OS=${1}
ARCH=${2}
TAG=${3}
if [ -z "$OS" ]; then
echo "usage: build.sh OS ARCH"
exit
fi
if [ -z "$ARCH" ]; then
echo "usage: build.sh OS ARCH"
exit
fi
if [ -z "$TAG" ]; then
TAG="community"
fi
echo "checking ..."
ZIP_PATH=$(which zip)
if [ -z "$ZIP_PATH" ]; then
echo "we need 'zip' command to compress files"
exit
fi
echo "building v${VERSION}/${OS}/${ARCH}/${TAG} ..."
# 生成 zip 文件名时不包含 plus 标记
if [ "${TAG}" = "plus" ]; then
ZIP="${NAME}-${OS}-${ARCH}-v${VERSION}.zip"
else
ZIP="${NAME}-${OS}-${ARCH}-${TAG}-v${VERSION}.zip"
fi
echo "copying ..."
if [ ! -d "$DIST" ]; then
mkdir "$DIST"
mkdir "$DIST"/bin
mkdir "$DIST"/configs
mkdir "$DIST"/logs
mkdir "$DIST"/data
if [ "$TAG" = "plus" ]; then
mkdir "$DIST"/scripts
mkdir "$DIST"/scripts/js
fi
fi
cp "$ROOT"/configs/api_node.template.yaml "$DIST"/configs
cp "$ROOT"/configs/cluster.template.yaml "$DIST"/configs
cp -R "$ROOT"/www "$DIST"/
cp -R "$ROOT"/pages "$DIST"/
copy_fluent_bit_assets "$ROOT" "$DIST" "$OS" "$ARCH" || exit 1
# we support TOA on linux only
if [ "$OS" == "linux" ] && [ -f "${ROOT}/edge-toa/edge-toa-${ARCH}" ]
then
if [ ! -d "$DIST/edge-toa" ]
then
mkdir "$DIST/edge-toa"
fi
cp "${ROOT}/edge-toa/edge-toa-${ARCH}" "$DIST/edge-toa/edge-toa"
fi
echo "building ..."
CC_PATH=""
CXX_PATH=""
CGO_LDFLAGS=""
CGO_CFLAGS=""
BUILD_TAG=$TAG
if [[ `uname -a` == *"Darwin"* && "${OS}" == "linux" ]]; then
if [ "${ARCH}" == "amd64" ]; then
# build with script support
if [ -d $GCC_X86_64_DIR ]; then
MUSL_DIR=$GCC_X86_64_DIR
CC_PATH="x86_64-unknown-linux-gnu-gcc"
CXX_PATH="x86_64-unknown-linux-gnu-g++"
if [ "$TAG" = "plus" ]; then
BUILD_TAG="plus,script,packet"
fi
else
CC_PATH="x86_64-linux-musl-gcc"
CXX_PATH="x86_64-linux-musl-g++"
fi
fi
if [ "${ARCH}" == "386" ]; then
CC_PATH="i486-linux-musl-gcc"
CXX_PATH="i486-linux-musl-g++"
fi
if [ "${ARCH}" == "arm64" ]; then
# build with script support
if [ -d $GCC_ARM64_DIR ]; then
MUSL_DIR=$GCC_ARM64_DIR
CC_PATH="aarch64-unknown-linux-gnu-gcc"
CXX_PATH="aarch64-unknown-linux-gnu-g++"
if [ "$TAG" = "plus" ]; then
BUILD_TAG="plus,script,packet"
fi
else
CC_PATH="aarch64-linux-musl-gcc"
CXX_PATH="aarch64-linux-musl-g++"
fi
fi
if [ "${ARCH}" == "arm" ]; then
CC_PATH="arm-linux-musleabi-gcc"
CXX_PATH="arm-linux-musleabi-g++"
fi
if [ "${ARCH}" == "mips64" ]; then
CC_PATH="mips64-linux-musl-gcc"
CXX_PATH="mips64-linux-musl-g++"
fi
if [ "${ARCH}" == "mips64le" ]; then
CC_PATH="mips64el-linux-musl-gcc"
CXX_PATH="mips64el-linux-musl-g++"
fi
fi
# libpcap
if [ "$OS" == "linux" ] && [[ "$ARCH" == "amd64" || "$ARCH" == "arm64" ]] && [ "$TAG" == "plus" ]; then
CGO_LDFLAGS="-L${SRCDIR}/libs/libpcap/${ARCH} -lpcap -L${SRCDIR}/libs/libbrotli/${ARCH} -lbrotlienc -lbrotlidec -lbrotlicommon"
CGO_CFLAGS="-I${SRCDIR}/libs/libpcap/src/libpcap -I${SRCDIR}/libs/libpcap/src/libpcap/pcap -I${SRCDIR}/libs/libbrotli/src/brotli/c/include -I${SRCDIR}/libs/libbrotli/src/brotli/c/include/brotli"
fi
if [ ! -z $CC_PATH ]; then
env CC=$MUSL_DIR/$CC_PATH \
CXX=$MUSL_DIR/$CXX_PATH GOOS="${OS}" \
GOARCH="${ARCH}" CGO_ENABLED=1 \
CGO_LDFLAGS="${CGO_LDFLAGS}" \
CGO_CFLAGS="${CGO_CFLAGS}" \
go build -trimpath -tags $BUILD_TAG -o "$DIST"/bin/${NAME} -ldflags "-linkmode external -extldflags -static -s -w" "$ROOT"/../cmd/edge-node/main.go
else
if [[ `uname` == *"Linux"* ]] && [ "$OS" == "linux" ] && [[ "$ARCH" == "amd64" || "$ARCH" == "arm64" ]] && [ "$TAG" == "plus" ]; then
BUILD_TAG="plus,script,packet"
fi
env GOOS="${OS}" GOARCH="${ARCH}" CGO_ENABLED=1 CGO_LDFLAGS="${CGO_LDFLAGS}" CGO_CFLAGS="${CGO_CFLAGS}" go build -trimpath -tags $BUILD_TAG -o "$DIST"/bin/${NAME} -ldflags="-s -w" "$ROOT"/../cmd/edge-node/main.go
fi
if [ ! -f "${DIST}/bin/${NAME}" ]; then
echo "build failed!"
exit
fi
# delete hidden files
find "$DIST" -name ".DS_Store" -delete
find "$DIST" -name ".gitignore" -delete
echo "zip files"
cd "${DIST}/../" || exit
if [ -f "${ZIP}" ]; then
rm -f "${ZIP}"
fi
zip -r -X -q "${ZIP}" ${NAME}/
rm -rf ${NAME}
cd - || exit
echo "OK"
}
function copy_fluent_bit_assets() {
ROOT=$1
DIST=$2
OS=$3
ARCH=$4
FLUENT_ROOT="$ROOT/../../deploy/fluent-bit"
FLUENT_DIST="$DIST/deploy/fluent-bit"
if [ ! -d "$FLUENT_ROOT" ]; then
echo "[error] fluent-bit source directory not found: $FLUENT_ROOT"
return 1
fi
verify_fluent_bit_package_matrix "$FLUENT_ROOT" "$ARCH" || return 1
rm -rf "$FLUENT_DIST"
mkdir -p "$FLUENT_DIST"
for file in fluent-bit.conf fluent-bit-dns.conf fluent-bit-https.conf fluent-bit-dns-https.conf fluent-bit-windows.conf fluent-bit-windows-https.conf parsers.conf clickhouse-upstream.conf clickhouse-upstream-windows.conf README.md; do
if [ -f "$FLUENT_ROOT/$file" ]; then
cp "$FLUENT_ROOT/$file" "$FLUENT_DIST/"
fi
done
if [ "$OS" = "linux" ]; then
PACKAGE_SRC="$FLUENT_ROOT/packages/linux-$ARCH"
PACKAGE_DST="$FLUENT_DIST/packages/linux-$ARCH"
if [ -d "$PACKAGE_SRC" ]; then
mkdir -p "$PACKAGE_DST"
cp -R "$PACKAGE_SRC/." "$PACKAGE_DST/"
else
echo "[error] fluent-bit package directory not found: $PACKAGE_SRC"
return 1
fi
fi
rm -f "$FLUENT_DIST/.gitignore"
rm -f "$FLUENT_DIST"/logs.db*
rm -rf "$FLUENT_DIST/storage"
return 0
}
function verify_fluent_bit_package_matrix() {
FLUENT_ROOT=$1
ARCH=$2
REQUIRED_FILES=()
if [ "$ARCH" = "amd64" ]; then
REQUIRED_FILES=(
"packages/linux-amd64/fluent-bit_4.2.2_amd64.deb"
"packages/linux-amd64/fluent-bit-4.2.2-1.x86_64.rpm"
)
elif [ "$ARCH" = "arm64" ]; then
REQUIRED_FILES=(
"packages/linux-arm64/fluent-bit_4.2.2_arm64.deb"
"packages/linux-arm64/fluent-bit-4.2.2-1.aarch64.rpm"
)
else
echo "[error] unsupported arch for fluent-bit package validation: $ARCH"
return 1
fi
MISSING=0
for FILE in "${REQUIRED_FILES[@]}"; do
if [ ! -f "$FLUENT_ROOT/$FILE" ]; then
echo "[error] fluent-bit matrix package missing: $FLUENT_ROOT/$FILE"
MISSING=1
fi
done
if [ "$MISSING" -ne 0 ]; then
return 1
fi
return 0
}
function lookup-version() {
FILE=$1
VERSION_DATA=$(cat "$FILE")
re="Version[ ]+=[ ]+\"([0-9.]+)\""
if [[ $VERSION_DATA =~ $re ]]; then
VERSION=${BASH_REMATCH[1]}
echo "$VERSION"
else
echo "could not match version"
exit
fi
}
build "$1" "$2" "$3"

6
EdgeNode/build/configs/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
node.json
api.yaml
api_node.yaml
cluster.yaml
api_cluster.yaml
*.cache

View File

@@ -0,0 +1,3 @@
* `api_node.template.yaml` - API相关配置模板
* `cluster.template.yaml` - 通过集群自动接入节点模板
* `cache.template.yaml` - 缓存优化配置模板TTL缓存和文件缓存优化参数

View File

@@ -0,0 +1,3 @@
rpc.endpoints: [ "" ]
nodeId: ""
secret: ""

View File

@@ -0,0 +1,70 @@
# GoEdge CDN 缓存优化配置文件模板
# 复制此文件为 cache.yaml 并根据实际情况修改配置
# TTL 缓存配置
ttl_cache:
# 分片配置
# pieces: 0 表示自动计算,> 0 表示手动指定分片数
# 自动计算规则:基础分片数 = CPU核心数 * 2根据系统内存调整
pieces: 0
min_pieces: 64 # 最小分片数
max_pieces: 1024 # 最大分片数
# GC 配置
gc:
base_interval: 2s # 基础 GC 间隔
min_interval: 1s # 最小 GC 间隔(过期率高时使用)
max_interval: 10s # 最大 GC 间隔(过期率低时使用)
adaptive: true # 是否启用自适应 GC根据过期率动态调整间隔
sample_size: 100 # 过期率采样大小(用于智能分片选择)
# 文件缓存配置
file_cache:
# SSD 检测配置
auto_detect_ssd: true # 是否自动检测 SSD通过 /sys/block/*/queue/rotational 或 lsblk
ssd_paths: [] # 手动指定 SSD 路径(可选,如:["/mnt/ssd1", "/mnt/ssd2"]
# 如果 auto_detect_ssd 为 false将使用此列表
# SSD 优化策略
# 热点数据将优先存储在 SSD 上
ssd:
hot_item_threshold: 100 # 访问次数超过此值视为热点数据
write_buffer_size: 128KB # 写入缓冲区大小SSD 随机写性能好,使用更大缓冲区)
read_ahead_size: 256KB # 预读大小SSD 使用更激进的预读策略)
sync_interval: 10s # 同步间隔SSD 不需要频繁同步,减少 fsync 频率)
open_file_cache_max: 10000 # 文件句柄缓存最大值SSD 可以缓存更多文件句柄)
# HDD 配置
# 传统机械硬盘的优化配置
hdd:
write_buffer_size: 64KB # 写入缓冲区大小
read_ahead_size: 128KB # 预读大小
sync_interval: 1s # 同步间隔HDD 需要更频繁的同步)
open_file_cache_max: 5000 # 文件句柄缓存最大值
# 内存缓存配置
# 内存缓存作为文件缓存的一级缓存,用于提升小文件访问性能
memory:
# 内存缓存容量(系统内存的百分比)
# 如果未设置,将根据系统内存自动计算:
# - 系统内存 < 32GB: 10%
# - 系统内存 >= 32GB: 15%
# - 系统内存 >= 64GB: 20%
# 限制范围:最小 512MB最大 32GB
capacity_percent: 15 # 默认 15%,可配置 10-30%
# 小文件阈值
small_file_threshold: 1MB # 1MB 以下视为小文件(强制使用内存缓存)
medium_file_threshold: 10MB # 10MB 以下视为中等文件(根据内存使用率决定)
# 小文件内存缓存策略
small_file_memory_cache: true # 小文件强制使用内存缓存(提升访问速度)
# 淘汰策略
eviction_policy: "lfu_lru" # 淘汰策略:
# - lfu: 最少使用频率Least Frequently Used
# - lru: 最近最少使用Least Recently Used
# - lfu_lru: 混合策略(保留热点和最近访问的数据)
preserve_hot_items: true # 保留热点数据(访问频率高的数据)
preserve_recent_items: true # 保留最近访问的数据(最近 1 小时内访问的数据)

View File

@@ -0,0 +1,58 @@
# GoEdge CDN 缓存优化配置文件
# 此文件用于配置 TTL 缓存和文件缓存的优化参数
# TTL 缓存配置
ttl_cache:
# 分片配置
# pieces: 0 表示自动计算,> 0 表示手动指定分片数
pieces: 0
min_pieces: 64 # 最小分片数
max_pieces: 1024 # 最大分片数
# GC 配置
gc:
base_interval: 2s # 基础 GC 间隔
min_interval: 1s # 最小 GC 间隔(过期率高时使用)
max_interval: 10s # 最大 GC 间隔(过期率低时使用)
adaptive: true # 是否启用自适应 GC
sample_size: 100 # 过期率采样大小
# 文件缓存配置
file_cache:
# SSD 检测配置
auto_detect_ssd: true # 是否自动检测 SSD
ssd_paths: [] # 手动指定 SSD 路径(可选,如:["/mnt/ssd1", "/mnt/ssd2"]
# SSD 优化策略
ssd:
hot_item_threshold: 100 # 访问次数超过此值视为热点数据
write_buffer_size: 128KB # 写入缓冲区大小SSD 随机写性能好)
read_ahead_size: 256KB # 预读大小
sync_interval: 10s # 同步间隔SSD 不需要频繁同步)
open_file_cache_max: 10000 # 文件句柄缓存最大值
# HDD 配置
hdd:
write_buffer_size: 64KB # 写入缓冲区大小
read_ahead_size: 128KB # 预读大小
sync_interval: 1s # 同步间隔
open_file_cache_max: 5000 # 文件句柄缓存最大值
# 内存缓存配置
memory:
# 内存缓存容量(系统内存的百分比)
# 如果未设置将根据系统内存自动计算10-20%
capacity_percent: 15 # 默认 15%,可配置 10-30%
# 小文件阈值
small_file_threshold: 1MB # 1MB 以下视为小文件
medium_file_threshold: 10MB # 10MB 以下视为中等文件
# 小文件内存缓存策略
small_file_memory_cache: true # 小文件强制使用内存缓存
# 淘汰策略
eviction_policy: "lfu_lru" # 淘汰策略lfu, lru, lfu_lru
preserve_hot_items: true # 保留热点数据
preserve_recent_items: true # 保留最近访问的数据

View File

@@ -0,0 +1,4 @@
rpc:
endpoints: [ "" ]
clusterId: ""
secret: ""

View File

@@ -0,0 +1 @@
{"speed":1,"speedMB":1400,"countTests":3}

View File

@@ -0,0 +1 @@
{"22":{"hour":22,"avg":3,"values":[2.95703125]}}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
MANIFEST-000010

View File

@@ -0,0 +1,48 @@
[Version]
pebble_version=0.1
[Options]
bytes_per_sync=1048576
cache_size=8388608
cleaner=delete
compaction_debt_concurrency=1073741824
comparer=leveldb.BytewiseComparator
disable_wal=false
flush_delay_delete_range=0s
flush_delay_range_key=0s
flush_split_bytes=4194304
format_major_version=1
l0_compaction_concurrency=10
l0_compaction_file_threshold=500
l0_compaction_threshold=4
l0_stop_writes_threshold=12
lbase_max_bytes=67108864
max_concurrent_compactions=1
max_manifest_file_size=134217728
max_open_files=1000
mem_table_size=67108864
mem_table_stop_writes_threshold=2
min_deletion_rate=0
merger=pebble.concatenate
read_compaction_rate=16000
read_sampling_multiplier=16
strict_wal_tail=true
table_cache_shards=16
table_property_collectors=[]
validate_on_ingest=false
wal_dir=
wal_bytes_per_sync=0
max_writer_concurrency=0
force_writer_parallelism=false
secondary_cache_size_bytes=0
create_on_shared=0
[Level "0"]
block_restart_interval=16
block_size=4096
block_size_threshold=90
compression=Snappy
filter_policy=none
filter_type=table
index_block_size=4096
target_file_size=2097152

View File

@@ -0,0 +1 @@
[]

1
EdgeNode/build/logs/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.log

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>Error</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>403 Forbidden</h3>
<p>Sorry, your access to the page has been denied. Please try again later.</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>Error</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>404 Not Found</h3>
<p>Sorry, the page you are looking for is not found. Please try again later.</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>Error</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>An error occurred.</h3>
<p>Sorry, the page you are looking for is currently unavailable. Please try again later.</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>Shutdown Notice</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>The website is shutdown.</h3>
<p>Sorry, the page you are looking for is currently unavailable. Please try again later.</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>升级中</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>网站升级中</h3>
<p>为了给您提供更好的服务,我们正在升级网站,请稍后重新访问。</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>临时关闭提醒</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
</head>
<body>
<h3>网站暂时关闭</h3>
<p>网站已被暂时关闭,请耐心等待我们的重新开通通知。</p>
<footer>Powered by GoEdge.</footer>
</body>
</html>

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
sudo go run -tags="plus script packet" ../cmd/edge-node/main.go

14
EdgeNode/build/test.sh Normal file
View File

@@ -0,0 +1,14 @@
#!/usr/bin/env bash
TAG=${1}
if [ -z "$TAG" ]; then
TAG="community"
fi
# stop node
go run -tags=${TAG} ../cmd/edge-node/main.go stop
# reference: https://pkg.go.dev/cmd/go/internal/test
go clean -testcache
go test -timeout 60s -tags="${TAG}" -cover ../...

0
EdgeNode/build/www/.gitignore vendored Normal file
View File

View File

@@ -0,0 +1,9 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Welcome</title>
</head>
<body>
I am index.
</body>
</html>

View File

@@ -0,0 +1,601 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/apps"
"github.com/TeaOSLab/EdgeNode/internal/configs"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/nodes"
"github.com/TeaOSLab/EdgeNode/internal/utils"
executils "github.com/TeaOSLab/EdgeNode/internal/utils/exec"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/maps"
"github.com/iwind/TeaGo/types"
"github.com/iwind/gosock/pkg/gosock"
"gopkg.in/yaml.v3"
"net"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"syscall"
"time"
)
func main() {
var app = apps.NewAppCmd().
Version(teaconst.Version).
Product(teaconst.ProductName).
Usage(teaconst.ProcessName + " [-v|start|stop|restart|status|quit|test|reload|service|daemon|config|pprof|top|accesslog|uninstall]").
Usage(teaconst.ProcessName + " [trackers|goman|conns|gc|bandwidth|disk|cache.garbage]").
Usage(teaconst.ProcessName + " [ip.drop|ip.reject|ip.remove|ip.close] IP")
app.On("start:before", func() {
// validate config
_, err := configs.LoadAPIConfig()
if err != nil {
// validate cluster config
_, clusterErr := configs.LoadClusterConfig()
if clusterErr != nil { // fail again
fmt.Println("[ERROR]start failed: load api config from '" + Tea.ConfigFile(configs.ConfigFileName) + "' failed: " + err.Error())
os.Exit(0)
}
}
})
app.On("uninstall", func() {
// service
fmt.Println("Uninstall service ...")
var manager = utils.NewServiceManager(teaconst.ProcessName, teaconst.ProductName)
go func() {
_ = manager.Uninstall()
}()
// stop
fmt.Println("Stopping ...")
_, _ = gosock.NewTmpSock(teaconst.ProcessName).SendTimeout(&gosock.Command{Code: "stop"}, 1*time.Second)
// delete files
var exe, _ = os.Executable()
if len(exe) == 0 {
return
}
var dir = filepath.Dir(filepath.Dir(exe)) // ROOT / bin / exe
// verify dir
{
fmt.Println("Checking '" + dir + "' ...")
for _, subDir := range []string{"bin/" + filepath.Base(exe), "configs", "logs"} {
_, err := os.Stat(dir + "/" + subDir)
if err != nil {
fmt.Println("[ERROR]program directory structure has been broken, please remove it manually.")
return
}
}
fmt.Println("Removing '" + dir + "' ...")
err := os.RemoveAll(dir)
if err != nil {
fmt.Println("[ERROR]remove failed: " + err.Error())
}
}
// delete symbolic links
fmt.Println("Removing symbolic links ...")
_ = os.Remove("/usr/bin/" + teaconst.ProcessName)
_ = os.Remove("/var/log/" + teaconst.ProcessName)
// delete configs
// nothing to delete for EdgeNode
// delete sock
fmt.Println("Removing temporary files ...")
var tempDir = os.TempDir()
_ = os.Remove(tempDir + "/" + teaconst.ProcessName + ".sock")
_ = os.Remove(tempDir + "/" + teaconst.AccessLogSockName)
// cache ...
fmt.Println("Please delete cache directories by yourself.")
// done
fmt.Println("[DONE]")
})
app.On("test", func() {
err := nodes.NewNode().Test()
if err != nil {
_, _ = os.Stderr.WriteString(err.Error())
}
})
app.On("reload", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "reload"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
var params = maps.NewMap(reply.Params)
if params.Has("error") {
fmt.Println("[ERROR]" + params.GetString("error"))
} else {
fmt.Println("ok")
}
}
})
app.On("daemon", func() {
nodes.NewNode().Daemon()
})
app.On("service", func() {
err := nodes.NewNode().InstallSystemService()
if err != nil {
fmt.Println("[ERROR]install failed: " + err.Error())
return
}
fmt.Println("done")
})
app.On("quit", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
_, err := sock.Send(&gosock.Command{Code: "quit"})
if err != nil {
fmt.Println("[ERROR]quit failed: " + err.Error())
return
}
fmt.Println("done")
})
app.On("pprof", func() {
var flagSet = flag.NewFlagSet("pprof", flag.ExitOnError)
var addr string
flagSet.StringVar(&addr, "addr", "", "")
_ = flagSet.Parse(os.Args[2:])
if len(addr) == 0 {
addr = "127.0.0.1:6060"
}
logs.Println("starting with pprof '" + addr + "'...")
go func() {
err := http.ListenAndServe(addr, nil)
if err != nil {
logs.Println("[ERROR]" + err.Error())
}
}()
var node = nodes.NewNode()
node.Start()
})
app.On("dbstat", func() {
teaconst.EnableDBStat = true
var node = nodes.NewNode()
node.Start()
})
app.On("trackers", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "trackers"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
labelsMap, ok := reply.Params["labels"]
if ok {
labels, ok := labelsMap.(map[string]interface{})
if ok {
if len(labels) == 0 {
fmt.Println("no labels yet")
} else {
var labelNames = []string{}
for label := range labels {
labelNames = append(labelNames, label)
}
sort.Strings(labelNames)
for _, labelName := range labelNames {
fmt.Println(labelName + ": " + fmt.Sprintf("%.6f", labels[labelName]))
}
}
}
}
}
})
app.On("goman", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "goman"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
instancesJSON, err := json.MarshalIndent(reply.Params, "", " ")
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
fmt.Println(string(instancesJSON))
}
}
})
app.On("conns", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "conns"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
resultJSON, err := json.MarshalIndent(reply.Params, "", " ")
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
fmt.Println(string(resultJSON))
}
}
})
app.On("gc", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "gc"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
if reply == nil {
fmt.Println("ok")
} else {
var paramMap = maps.NewMap(reply.Params)
var pauseMS = paramMap.GetFloat64("pauseMS")
var costMS = paramMap.GetFloat64("costMS")
fmt.Printf("ok, cost: %.4fms, pause: %.4fms", costMS, pauseMS)
}
}
})
app.On("ip.drop", func() {
var args = os.Args[2:]
if len(args) == 0 {
fmt.Println("Usage: edge-node ip.drop IP [--timeout=SECONDS] [--async]")
return
}
var ip = args[0]
if len(net.ParseIP(ip)) == 0 {
fmt.Println("IP '" + ip + "' is invalid")
return
}
var timeoutSeconds = 0
var options = app.ParseOptions(args[1:])
timeout, ok := options["timeout"]
if ok {
timeoutSeconds = types.Int(timeout[0])
}
var async = false
_, ok = options["async"]
if ok {
async = true
}
fmt.Println("drop ip '" + ip + "' for '" + types.String(timeoutSeconds) + "' seconds")
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{
Code: "dropIP",
Params: map[string]interface{}{
"ip": ip,
"timeoutSeconds": timeoutSeconds,
"async": async,
},
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
var errString = maps.NewMap(reply.Params).GetString("error")
if len(errString) > 0 {
fmt.Println("[ERROR]" + errString)
} else {
fmt.Println("ok")
}
}
})
app.On("ip.reject", func() {
var args = os.Args[2:]
if len(args) == 0 {
fmt.Println("Usage: edge-node ip.reject IP [--timeout=SECONDS]")
return
}
var ip = args[0]
if len(net.ParseIP(ip)) == 0 {
fmt.Println("IP '" + ip + "' is invalid")
return
}
var timeoutSeconds = 0
var options = app.ParseOptions(args[1:])
timeout, ok := options["timeout"]
if ok {
timeoutSeconds = types.Int(timeout[0])
}
fmt.Println("reject ip '" + ip + "' for '" + types.String(timeoutSeconds) + "' seconds")
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{
Code: "rejectIP",
Params: map[string]interface{}{
"ip": ip,
"timeoutSeconds": timeoutSeconds,
},
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
var errString = maps.NewMap(reply.Params).GetString("error")
if len(errString) > 0 {
fmt.Println("[ERROR]" + errString)
} else {
fmt.Println("ok")
}
}
})
app.On("ip.close", func() {
var args = os.Args[2:]
if len(args) == 0 {
fmt.Println("Usage: edge-node ip.close IP")
return
}
var ip = args[0]
if len(net.ParseIP(ip)) == 0 {
fmt.Println("IP '" + ip + "' is invalid")
return
}
fmt.Println("close ip '" + ip)
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{
Code: "closeIP",
Params: map[string]any{
"ip": ip,
},
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
var errString = maps.NewMap(reply.Params).GetString("error")
if len(errString) > 0 {
fmt.Println("[ERROR]" + errString)
} else {
fmt.Println("ok")
}
}
})
app.On("ip.remove", func() {
var args = os.Args[2:]
if len(args) == 0 {
fmt.Println("Usage: edge-node ip.remove IP")
return
}
var ip = args[0]
if len(net.ParseIP(ip)) == 0 {
fmt.Println("IP '" + ip + "' is invalid")
return
}
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{
Code: "removeIP",
Params: map[string]interface{}{
"ip": ip,
},
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
var errString = maps.NewMap(reply.Params).GetString("error")
if len(errString) > 0 {
fmt.Println("[ERROR]" + errString)
} else {
fmt.Println("ok")
}
}
})
app.On("accesslog", func() {
// local sock
var tmpDir = os.TempDir()
var sockFile = tmpDir + "/" + teaconst.AccessLogSockName
_, err := os.Stat(sockFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Println("[ERROR]" + err.Error())
return
}
}
var processSock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := processSock.Send(&gosock.Command{
Code: "accesslog",
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
return
}
if reply.Code == "error" {
var errString = maps.NewMap(reply.Params).GetString("error")
if len(errString) > 0 {
fmt.Println("[ERROR]" + errString)
return
}
}
conn, err := net.Dial("unix", sockFile)
if err != nil {
fmt.Println("[ERROR]start reading access log failed: " + err.Error())
return
}
defer func() {
_ = conn.Close()
}()
var buf = make([]byte, 1024)
for {
n, err := conn.Read(buf)
if n > 0 {
fmt.Print(string(buf[:n]))
}
if err != nil {
break
}
}
})
app.On("bandwidth", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "bandwidth"})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
return
}
var statsMap = maps.NewMap(reply.Params).Get("stats")
statsJSON, err := json.MarshalIndent(statsMap, "", " ")
if err != nil {
fmt.Println("[ERROR]" + err.Error())
return
}
fmt.Println(string(statsJSON))
})
app.On("disk", func() {
var args = os.Args[2:]
if len(args) > 0 {
switch args[0] {
case "speed":
speedMB, isFast, err := fsutils.CheckDiskIsFast()
if err != nil {
fmt.Println("[ERROR]" + err.Error())
} else {
fmt.Printf("Speed: %.0fMB/s\n", speedMB)
if isFast {
fmt.Println("IsFast: true")
} else {
fmt.Println("IsFast: false")
}
}
default:
fmt.Println("Usage: edge-node disk [speed]")
}
} else {
fmt.Println("Usage: edge-node disk [speed]")
}
})
app.On("cache.garbage", func() {
fmt.Println("scanning ...")
var shouldDelete bool
for _, arg := range os.Args {
if strings.TrimLeft(arg, "-") == "delete" {
shouldDelete = true
}
}
var progressSock = gosock.NewTmpSock(teaconst.CacheGarbageSockName)
progressSock.OnCommand(func(cmd *gosock.Command) {
var params = maps.NewMap(cmd.Params)
if cmd.Code == "progress" {
fmt.Printf("%.2f%% %d\n", params.GetFloat64("progress")*100, params.GetInt("count"))
_ = cmd.ReplyOk()
}
})
go func() {
_ = progressSock.Listen()
}()
time.Sleep(1 * time.Second)
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{
Code: "cache.garbage",
Params: map[string]any{"delete": shouldDelete},
})
if err != nil {
fmt.Println("[ERROR]" + err.Error())
return
}
var params = maps.NewMap(reply.Params)
if params.GetBool("isOk") {
var count = params.GetInt("count")
fmt.Println("found", count, "bad caches")
if count > 0 {
fmt.Println("======")
var sampleFiles = params.GetSlice("sampleFiles")
for _, file := range sampleFiles {
fmt.Println(types.String(file))
}
if count > len(sampleFiles) {
fmt.Println("... more files")
}
}
} else {
fmt.Println("[ERROR]" + params.GetString("error"))
}
})
app.On("config", func() {
var configString = os.Args[len(os.Args)-1]
if configString == "config" {
fmt.Println("Usage: edge-node config '\nrpc.endpoints: [\"...\"]\nnodeId: \"...\"\nsecret: \"...\"\n'")
return
}
var config = &configs.APIConfig{}
err := yaml.Unmarshal([]byte(configString), config)
if err != nil {
fmt.Println("[ERROR]decode config failed: " + err.Error())
return
}
err = config.Init()
if err != nil {
fmt.Println("[ERROR]validate config failed: " + err.Error())
return
}
// marshal again
configYAML, err := yaml.Marshal(config)
if err != nil {
fmt.Println("[ERROR]encode config failed: " + err.Error())
return
}
err = os.WriteFile(Tea.Root+"/configs/api_node.yaml", configYAML, 0666)
if err != nil {
fmt.Println("[ERROR]write config failed: " + err.Error())
return
}
fmt.Println("success")
})
app.On("top", func() {
var sock = gosock.NewTmpSock(teaconst.ProcessName)
reply, err := sock.Send(&gosock.Command{Code: "pid"})
if err != nil {
fmt.Println("[ERROR]not started yet")
return
}
var pid = maps.NewMap(reply.Params).GetInt("pid")
if pid <= 0 {
fmt.Println("[ERROR]invalid pid '" + types.String(pid) + "'")
return
}
topExe, _ := executils.LookPath("top")
if len(topExe) > 0 {
if runtime.GOOS == "linux" {
err = syscall.Exec(topExe, []string{topExe, "-p", types.String(pid)}, os.Environ())
} else if runtime.GOOS == "darwin" {
err = syscall.Exec(topExe, []string{topExe, "-pid", types.String(pid)}, os.Environ())
} else {
fmt.Println("[ERROR]not supported os '" + runtime.GOOS + "'")
return
}
if err != nil {
fmt.Println("[ERROR]start failed: " + err.Error())
}
} else {
fmt.Println("[ERROR]could not found 'top' command in this system")
}
})
app.Run(func() {
var node = nodes.NewNode()
node.Start()
})
}

2
EdgeNode/dist/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
*.zip
edge-node

101
EdgeNode/go.mod Normal file
View File

@@ -0,0 +1,101 @@
module github.com/TeaOSLab/EdgeNode
go 1.25
replace (
github.com/TeaOSLab/EdgeCommon => ../EdgeCommon
github.com/dchest/captcha => github.com/iwind/captcha v0.0.0-20231130092438-ae985686ed84
github.com/fsnotify/fsnotify => github.com/iwind/fsnotify v1.5.2-0.20220817040843-193be2051ff4
)
require (
github.com/TeaOSLab/EdgeCommon v0.0.0-00010101000000-000000000000
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/andybalholm/brotli v1.0.5
github.com/aws/aws-sdk-go v1.44.279
github.com/baidubce/bce-sdk-go v0.9.170
github.com/biessek/golang-ico v0.0.0-20180326222316-d348d9ea4670
github.com/cespare/xxhash/v2 v2.3.0
github.com/cockroachdb/pebble v1.1.0
github.com/dchest/captcha v0.0.0-00010101000000-000000000000
github.com/fsnotify/fsnotify v1.7.0
github.com/go-redis/redis/v8 v8.11.5
github.com/google/gopacket v1.1.19
github.com/google/nftables v0.2.0
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible
github.com/iwind/TeaGo v0.0.0-20240411075713-6c1fc9aca7b6
github.com/iwind/gofcgi v0.0.0-20210528023741-a92711d45f11
github.com/iwind/gosock v0.0.0-20211103081026-ee4652210ca4
github.com/iwind/gowebp v0.0.0-20240109104518-489f3429f5c5
github.com/klauspost/compress v1.17.8
github.com/mattn/go-sqlite3 v1.14.17
github.com/mdlayher/netlink v1.7.2
github.com/miekg/dns v1.1.43
github.com/mssola/useragent v1.0.0
github.com/pires/go-proxyproto v0.6.1
github.com/qiniu/go-sdk/v7 v7.16.0
github.com/quic-go/quic-go v0.42.0
github.com/shirou/gopsutil/v3 v3.22.2
github.com/tdewolff/minify/v2 v2.20.20
github.com/tencentyun/cos-go-sdk-v5 v0.7.41
gopkg.in/natefinch/lumberjack.v2 v2.2.1
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f
golang.org/x/image v0.15.0
golang.org/x/net v0.47.0
golang.org/x/sys v0.38.0
google.golang.org/grpc v1.78.0
google.golang.org/protobuf v1.36.10
gopkg.in/yaml.v3 v3.0.1
rogchap.com/v8go v0.8.0
)
require (
github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/jsummers/gobmp v0.0.0-20151104160322-e2ba15ffa76e // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mozillazg/go-httpheader v0.2.1 // indirect
github.com/onsi/ginkgo/v2 v2.16.0 // indirect
github.com/oschwald/geoip2-golang v1.13.0 // indirect
github.com/oschwald/maxminddb-golang v1.13.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.51.0 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/tdewolff/parse/v2 v2.7.13 // indirect
github.com/tklauser/go-sysconf v0.3.9 // indirect
github.com/tklauser/numcpus v0.3.0 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/mock v0.4.0 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect
)

320
EdgeNode/go.sum Normal file
View File

@@ -0,0 +1,320 @@
github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE=
github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/aws/aws-sdk-go v1.44.279 h1:g23dxnYjIiPlQo0gIKNR0zVPsSvo1bj5frWln+5sfhk=
github.com/aws/aws-sdk-go v1.44.279/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/baidubce/bce-sdk-go v0.9.170 h1:vAr7COuhu6SEf+8f77DVRji45x7TVZtY5kbu9sX7q8g=
github.com/baidubce/bce-sdk-go v0.9.170/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/biessek/golang-ico v0.0.0-20180326222316-d348d9ea4670 h1:FQPKKjDhzG0T4ew6dm6MGrXb4PRAi8ZmTuYuxcF62BM=
github.com/biessek/golang-ico v0.0.0-20180326222316-d348d9ea4670/go.mod h1:iRWAFbKXMMkVQyxZ1PfGlkBr1TjATx1zy2MRprV7A3Q=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/nftables v0.2.0 h1:PbJwaBmbVLzpeldoeUKGkE2RjstrjPKMl6oLrfEJ6/8=
github.com/google/nftables v0.2.0/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible h1:XQVXdk+WAJ4fSNB6mMRuYNvFWou7BZs6SZB925hPrnk=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/iwind/TeaGo v0.0.0-20240411075713-6c1fc9aca7b6 h1:dS3pTxrLlDQxdoxSUcHkHnr3LHpsBIXv8v2/xw65RN8=
github.com/iwind/TeaGo v0.0.0-20240411075713-6c1fc9aca7b6/go.mod h1:SfqVbWyIPdVflyA6lMgicZzsoGS8pyeLiTRe8/CIpGI=
github.com/iwind/captcha v0.0.0-20231130092438-ae985686ed84 h1:/RtK8t22a/YFkBWiEwxS+JWcDmxAKsu+r+p00c36K0Q=
github.com/iwind/captcha v0.0.0-20231130092438-ae985686ed84/go.mod h1:7zoElIawLp7GUMLcj54K9kbw+jEyvz2K0FDdRRYhvWo=
github.com/iwind/fsnotify v1.5.2-0.20220817040843-193be2051ff4 h1:PKtXlgNHJhdwl5ozio7KRV3n0SckMw+8ZC2NCpRSv8U=
github.com/iwind/fsnotify v1.5.2-0.20220817040843-193be2051ff4/go.mod h1:DmAukmDY25inGlriLn0B2jidmvaDR1REOcPXwvzvDPI=
github.com/iwind/gofcgi v0.0.0-20210528023741-a92711d45f11 h1:DaQjoWZhLNxjhIXedVg4/vFEtHkZhK4IjIwsWdyzBLg=
github.com/iwind/gofcgi v0.0.0-20210528023741-a92711d45f11/go.mod h1:JtbX20untAjUVjZs1ZBtq80f5rJWvwtQNRL6EnuYRnY=
github.com/iwind/gosock v0.0.0-20211103081026-ee4652210ca4 h1:VWGsCqTzObdlbf7UUE3oceIpcEKi4C/YBUszQXk118A=
github.com/iwind/gosock v0.0.0-20211103081026-ee4652210ca4/go.mod h1:H5Q7SXwbx3a97ecJkaS2sD77gspzE7HFUafBO0peEyA=
github.com/iwind/gowebp v0.0.0-20240109104518-489f3429f5c5 h1:tA0HEDQJ/FM847wc7kVpSgkTfMF1LervEmd2UZQr3Po=
github.com/iwind/gowebp v0.0.0-20240109104518-489f3429f5c5/go.mod h1:AYyXDhbbD7q9N6rJff2jrE7pGupaiyvtv3YeyIAQLXk=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA=
github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/jsummers/gobmp v0.0.0-20151104160322-e2ba15ffa76e h1:LvL4XsI70QxOGHed6yhQtAU34Kx3Qq2wwBzGFKY8zKk=
github.com/jsummers/gobmp v0.0.0-20151104160322-e2ba15ffa76e/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ=
github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60=
github.com/mssola/useragent v1.0.0 h1:WRlDpXyxHDNfvZaPEut5Biveq86Ze4o4EMffyMxmH5o=
github.com/mssola/useragent v1.0.0/go.mod h1:hz9Cqz4RXusgg1EdI4Al0INR62kP7aPSRNHnpU+b85Y=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/oschwald/geoip2-golang v1.13.0 h1:Q44/Ldc703pasJeP5V9+aFSZFmBN7DKHbNsSFzQATJI=
github.com/oschwald/geoip2-golang v1.13.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw=
github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/common v0.51.0 h1:vT5R9NAlW4V6k8Wruk7ikrHaHRsrPbduM/cKTOdQM/k=
github.com/prometheus/common v0.51.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ=
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk=
github.com/qiniu/go-sdk/v7 v7.16.0 h1:Jt4YOMLuaDfgb/KdVg0O1fYLpv5MDkYe/zV+Ri7gWRs=
github.com/qiniu/go-sdk/v7 v7.16.0/go.mod h1:nqoYCNo53ZlGA521RvRethvxUDvXKt4gtYXOwye868w=
github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM=
github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks=
github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tdewolff/minify/v2 v2.20.20 h1:vhULb+VsW2twkplgsawAoUY957efb+EdiZ7zu5fUhhk=
github.com/tdewolff/minify/v2 v2.20.20/go.mod h1:GYaLXFpIIwsX99apQHXfGdISUdlA98wmaoWxjT9C37k=
github.com/tdewolff/parse/v2 v2.7.13 h1:iSiwOUkCYLNfapHoqdLcqZVgvQ0jrsao8YYKP/UJYTI=
github.com/tdewolff/parse/v2 v2.7.13/go.mod h1:3FbJWZp3XT9OWVN3Hmfp0p/a08v4h8J9W1aghka0soA=
github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739 h1:IkjBCtQOOjIn03u/dMQK9g+Iw9ewps4mCl1nB8Sscbo=
github.com/tdewolff/test v1.0.11-0.20240106005702-7de5f7df4739/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4=
github.com/tencentyun/cos-go-sdk-v5 v0.7.41 h1:iU0Li/Np78H4SBna0ECQoF3mpgi6ImLXU+doGzPFXGc=
github.com/tencentyun/cos-go-sdk-v5 v0.7.41/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8=
golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rogchap.com/v8go v0.8.0 h1:/crDEiga68kOtbIqw3K9Rt9OztYz0LhAPHm2e3wK7Q4=
rogchap.com/v8go v0.8.0/go.mod h1:MxgP3pL2MW4dpme/72QRs8sgNMmM0pRc8DPhcuLWPAs=

View File

@@ -0,0 +1,302 @@
package accesslogs
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"gopkg.in/natefinch/lumberjack.v2"
)
var (
sharedFileWriter *FileWriter
sharedOnce sync.Once
)
// SharedFileWriter 返回全局本地日志文件写入器(单例)
func SharedFileWriter() *FileWriter {
sharedOnce.Do(func() {
sharedFileWriter = NewFileWriter()
})
return sharedFileWriter
}
const (
defaultLogDir = "/var/log/edge/edge-node"
envLogDir = "EDGE_LOG_DIR"
)
// FileWriter 将访问/WAF/错误日志以 JSON Lines 写入本地文件,便于 Fluent Bit 采集。
// 文件轮转由 lumberjack 内建完成。
type FileWriter struct {
dir string
mu sync.Mutex
files map[string]*lumberjack.Logger // access.log, waf.log, error.log
rotateConfig *serverconfigs.AccessLogRotateConfig
inited bool
}
// NewFileWriter 创建本地日志文件写入器
func NewFileWriter() *FileWriter {
dir := resolveDefaultLogDir()
return &FileWriter{
dir: dir,
files: make(map[string]*lumberjack.Logger),
rotateConfig: serverconfigs.NewDefaultAccessLogRotateConfig(),
}
}
func resolveDefaultLogDir() string {
dir := strings.TrimSpace(os.Getenv(envLogDir))
if dir == "" {
return defaultLogDir
}
return dir
}
func resolveDirFromPolicyPath(policyPath string) string {
policyPath = strings.TrimSpace(policyPath)
if policyPath == "" {
return ""
}
if strings.HasSuffix(policyPath, "/") || strings.HasSuffix(policyPath, "\\") {
return filepath.Clean(policyPath)
}
baseName := filepath.Base(policyPath)
if strings.Contains(baseName, ".") || strings.Contains(baseName, "${") {
return filepath.Clean(filepath.Dir(policyPath))
}
return filepath.Clean(policyPath)
}
// Dir 返回当前配置的日志目录
func (w *FileWriter) Dir() string {
return w.dir
}
// SetDirByPolicyPath 使用公用日志策略 path 更新目录,空值时回退到 EDGE_LOG_DIR/default。
func (w *FileWriter) SetDirByPolicyPath(policyPath string) {
dir := resolveDirFromPolicyPath(policyPath)
w.SetDir(dir)
}
// SetDir 更新日志目录并重置文件句柄。
func (w *FileWriter) SetDir(dir string) {
if strings.TrimSpace(dir) == "" {
dir = resolveDefaultLogDir()
}
w.mu.Lock()
defer w.mu.Unlock()
if dir == w.dir {
return
}
for name, file := range w.files {
if file != nil {
_ = file.Close()
}
w.files[name] = nil
}
w.inited = false
w.dir = dir
}
// SetRotateConfig 更新日志轮转配置并重建 writer。
func (w *FileWriter) SetRotateConfig(config *serverconfigs.AccessLogRotateConfig) {
normalized := config.Normalize()
w.mu.Lock()
defer w.mu.Unlock()
if equalRotateConfig(w.rotateConfig, normalized) {
return
}
for name, file := range w.files {
if file != nil {
_ = file.Close()
}
w.files[name] = nil
}
w.inited = false
w.rotateConfig = normalized
}
// IsEnabled 是否启用落盘(目录非空即视为启用)
func (w *FileWriter) IsEnabled() bool {
return w.dir != ""
}
// EnsureInit 在启动时预创建日志目录和空文件,便于 Fluent Bit 立即 tail无需等首条访问日志
func (w *FileWriter) EnsureInit() error {
if w.dir == "" {
return nil
}
return w.init()
}
// init 确保目录存在并打开三个日志文件(仅首次或 Reopen 时)
func (w *FileWriter) init() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.inited && len(w.files) > 0 {
return nil
}
if w.dir == "" {
return nil
}
if err := os.MkdirAll(w.dir, 0755); err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "mkdir log dir failed: "+err.Error())
return err
}
for _, name := range []string{"access.log", "waf.log", "error.log"} {
if w.files[name] != nil {
continue
}
w.files[name] = w.newLogger(name)
}
w.inited = true
return nil
}
func (w *FileWriter) newLogger(fileName string) *lumberjack.Logger {
rotateConfig := w.rotateConfig.Normalize()
return &lumberjack.Logger{
Filename: filepath.Join(w.dir, fileName),
MaxSize: rotateConfig.MaxSizeMB,
MaxBackups: rotateConfig.MaxBackups,
MaxAge: rotateConfig.MaxAgeDays,
Compress: *rotateConfig.Compress,
LocalTime: *rotateConfig.LocalTime,
}
}
// Write 将一条访问日志按 log_type 写入对应文件access.log / waf.log / error.log
func (w *FileWriter) Write(l *pb.HTTPAccessLog, clusterId int64) {
if w.dir == "" {
return
}
if err := w.init(); err != nil || len(w.files) == 0 {
return
}
ingest, logType := FromHTTPAccessLog(l, clusterId)
line, err := json.Marshal(ingest)
if err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "marshal ingest log: "+err.Error())
return
}
var fileName string
switch logType {
case LogTypeWAF:
fileName = "waf.log"
case LogTypeError:
fileName = "error.log"
default:
fileName = "access.log"
}
w.mu.Lock()
file := w.files[fileName]
w.mu.Unlock()
if file == nil {
return
}
_, err = file.Write(append(line, '\n'))
if err != nil {
remotelogs.Error("ACCESS_LOG_FILE", "write "+fileName+" failed: "+err.Error())
}
}
// WriteBatch 批量写入,减少锁竞争
func (w *FileWriter) WriteBatch(logs []*pb.HTTPAccessLog, clusterId int64) {
if w.dir == "" || len(logs) == 0 {
return
}
if err := w.init(); err != nil || len(w.files) == 0 {
return
}
w.mu.Lock()
accessFile := w.files["access.log"]
wafFile := w.files["waf.log"]
errorFile := w.files["error.log"]
w.mu.Unlock()
if accessFile == nil && wafFile == nil && errorFile == nil {
return
}
for _, logItem := range logs {
ingest, logType := FromHTTPAccessLog(logItem, clusterId)
line, err := json.Marshal(ingest)
if err != nil {
continue
}
line = append(line, '\n')
var file *lumberjack.Logger
switch logType {
case LogTypeWAF:
file = wafFile
case LogTypeError:
file = errorFile
default:
file = accessFile
}
if file != nil {
_, _ = file.Write(line)
}
}
}
// Reopen 关闭并重建所有日志 writer供 SIGHUP 兼容调用)。
func (w *FileWriter) Reopen() error {
if w.dir == "" {
return nil
}
w.mu.Lock()
for name, file := range w.files {
if file != nil {
_ = file.Close()
w.files[name] = nil
}
}
w.inited = false
w.mu.Unlock()
return w.init()
}
// Close 关闭所有已打开的文件
func (w *FileWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
var lastErr error
for name, file := range w.files {
if file != nil {
if err := file.Close(); err != nil {
lastErr = err
remotelogs.Error("ACCESS_LOG_FILE", fmt.Sprintf("close %s: %v", name, err))
}
w.files[name] = nil
}
}
w.inited = false
return lastErr
}
func equalRotateConfig(left *serverconfigs.AccessLogRotateConfig, right *serverconfigs.AccessLogRotateConfig) bool {
if left == nil || right == nil {
return left == right
}
return left.MaxSizeMB == right.MaxSizeMB &&
left.MaxBackups == right.MaxBackups &&
left.MaxAgeDays == right.MaxAgeDays &&
*left.Compress == *right.Compress &&
*left.LocalTime == *right.LocalTime
}

View File

@@ -0,0 +1,137 @@
// Package accesslogs 提供边缘节点访问日志落盘JSON Lines供 Fluent Bit 采集写入 ClickHouse。
package accesslogs
import (
"encoding/json"
"github.com/TeaOSLab/EdgeCommon/pkg/rpc/pb"
)
// LogType 与 Fluent Bit / logs_ingest 的 log_type 一致
const (
LogTypeAccess = "access"
LogTypeWAF = "waf"
LogTypeError = "error"
)
// 请求/响应 body 落盘最大长度(字节),超出截断,避免单条过大
const maxBodyLen = 512 * 1024
// IngestLog 单行 JSON 结构与方案文档、ClickHouse logs_ingest 表字段对齐
type IngestLog struct {
Timestamp int64 `json:"timestamp"`
NodeId int64 `json:"node_id"`
ClusterId int64 `json:"cluster_id"`
ServerId int64 `json:"server_id"`
Host string `json:"host"`
IP string `json:"ip"`
Method string `json:"method"`
Path string `json:"path"`
Status int32 `json:"status"`
BytesIn int64 `json:"bytes_in"`
BytesOut int64 `json:"bytes_out"`
CostMs int64 `json:"cost_ms"`
UA string `json:"ua"`
Referer string `json:"referer"`
LogType string `json:"log_type"`
TraceId string `json:"trace_id,omitempty"`
FirewallPolicyId int64 `json:"firewall_policy_id,omitempty"`
FirewallRuleGroupId int64 `json:"firewall_rule_group_id,omitempty"`
FirewallRuleSetId int64 `json:"firewall_rule_set_id,omitempty"`
FirewallRuleId int64 `json:"firewall_rule_id,omitempty"`
RequestHeaders string `json:"request_headers,omitempty"`
RequestBody string `json:"request_body,omitempty"`
ResponseHeaders string `json:"response_headers,omitempty"`
ResponseBody string `json:"response_body,omitempty"`
}
// stringsMapToJSON 将 map[string]*Strings 转为 JSON 字符串,便于落盘与 ClickHouse 存储
func stringsMapToJSON(m map[string]*pb.Strings) string {
if len(m) == 0 {
return ""
}
out := make(map[string]string, len(m))
for k, v := range m {
if v != nil && len(v.Values) > 0 {
out[k] = v.Values[0]
}
}
if len(out) == 0 {
return ""
}
b, _ := json.Marshal(out)
return string(b)
}
// truncateBody 截断 body 到最大长度,避免单条过大
func truncateBody(b []byte) string {
if len(b) == 0 {
return ""
}
s := string(b)
if len(s) > maxBodyLen {
return s[:maxBodyLen]
}
return s
}
// buildRequestBody 将查询串与请求体合并写入 request_body 字段(不新增字段)
func buildRequestBody(l *pb.HTTPAccessLog) string {
q := l.GetQueryString()
body := l.GetRequestBody()
if len(q) == 0 && len(body) == 0 {
return ""
}
if len(body) == 0 {
return truncateBody([]byte(q))
}
combined := make([]byte, 0, len(q)+1+len(body))
combined = append(combined, q...)
combined = append(combined, '\n')
combined = append(combined, body...)
return truncateBody(combined)
}
// FromHTTPAccessLog 从 pb.HTTPAccessLog 转为 IngestLog并决定 log_type
func FromHTTPAccessLog(l *pb.HTTPAccessLog, clusterId int64) (ingest IngestLog, logType string) {
ingest = IngestLog{
Timestamp: l.GetTimestamp(),
NodeId: l.GetNodeId(),
ClusterId: clusterId,
ServerId: l.GetServerId(),
Host: l.GetHost(),
IP: l.GetRawRemoteAddr(),
Method: l.GetRequestMethod(),
Path: l.GetRequestURI(), // 使用 RequestURI 以包含查询参数
Status: l.GetStatus(),
BytesIn: l.GetRequestLength(),
BytesOut: l.GetBytesSent(),
CostMs: int64(l.GetRequestTime() * 1000),
UA: l.GetUserAgent(),
Referer: l.GetReferer(),
TraceId: l.GetRequestId(),
FirewallPolicyId: l.GetFirewallPolicyId(),
FirewallRuleGroupId: l.GetFirewallRuleGroupId(),
FirewallRuleSetId: l.GetFirewallRuleSetId(),
FirewallRuleId: l.GetFirewallRuleId(),
RequestHeaders: stringsMapToJSON(l.GetHeader()),
RequestBody: buildRequestBody(l),
ResponseHeaders: stringsMapToJSON(l.GetSentHeader()),
}
if ingest.IP == "" {
ingest.IP = l.GetRemoteAddr()
}
// 响应 body 当前 pb 未提供,若后续扩展可在此赋值
// ingest.ResponseBody = ...
// 与方案一致waf > error > access攻击日志通过 firewall_rule_id / firewall_policy_id 判断
if l.GetFirewallPolicyId() > 0 {
logType = LogTypeWAF
} else if len(l.GetErrors()) > 0 {
logType = LogTypeError
} else {
logType = LogTypeAccess
}
ingest.LogType = logType
return ingest, logType
}

View File

@@ -0,0 +1,349 @@
package apps
import (
"errors"
"fmt"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
executils "github.com/TeaOSLab/EdgeNode/internal/utils/exec"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/maps"
"github.com/iwind/TeaGo/types"
"github.com/iwind/gosock/pkg/gosock"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
)
// AppCmd App命令帮助
type AppCmd struct {
product string
version string
usages []string
options []*CommandHelpOption
appendStrings []string
directives []*Directive
sock *gosock.Sock
}
func NewAppCmd() *AppCmd {
return &AppCmd{
sock: gosock.NewTmpSock(teaconst.ProcessName),
}
}
type CommandHelpOption struct {
Code string
Description string
}
// Product 产品
func (this *AppCmd) Product(product string) *AppCmd {
this.product = product
return this
}
// Version 版本
func (this *AppCmd) Version(version string) *AppCmd {
this.version = version
return this
}
// Usage 使用方法
func (this *AppCmd) Usage(usage string) *AppCmd {
this.usages = append(this.usages, usage)
return this
}
// Option 选项
func (this *AppCmd) Option(code string, description string) *AppCmd {
this.options = append(this.options, &CommandHelpOption{
Code: code,
Description: description,
})
return this
}
// Append 附加内容
func (this *AppCmd) Append(appendString string) *AppCmd {
this.appendStrings = append(this.appendStrings, appendString)
return this
}
// Print 打印
func (this *AppCmd) Print() {
fmt.Println(this.product + " v" + this.version)
fmt.Println("Usage:")
for _, usage := range this.usages {
fmt.Println(" " + usage)
}
if len(this.options) > 0 {
fmt.Println("")
fmt.Println("Options:")
var spaces = 20
var max = 40
for _, option := range this.options {
l := len(option.Code)
if l < max && l > spaces {
spaces = l + 4
}
}
for _, option := range this.options {
if len(option.Code) > max {
fmt.Println("")
fmt.Println(" " + option.Code)
option.Code = ""
}
fmt.Printf(" %-"+strconv.Itoa(spaces)+"s%s\n", option.Code, ": "+option.Description)
}
}
if len(this.appendStrings) > 0 {
fmt.Println("")
for _, s := range this.appendStrings {
fmt.Println(s)
}
}
}
// On 添加指令
func (this *AppCmd) On(arg string, callback func()) {
this.directives = append(this.directives, &Directive{
Arg: arg,
Callback: callback,
})
}
// Run 运行
func (this *AppCmd) Run(main func()) {
// 获取参数
var args = os.Args[1:]
if len(args) > 0 {
var mainArg = args[0]
this.callDirective(mainArg + ":before")
switch mainArg {
case "-v", "version", "-version", "--version":
this.runVersion()
return
case "?", "help", "-help", "h", "-h":
this.runHelp()
return
case "start":
this.runStart()
return
case "stop":
this.runStop()
return
case "restart":
this.runRestart()
return
case "status":
this.runStatus()
return
}
// 查找指令
for _, directive := range this.directives {
if directive.Arg == mainArg {
directive.Callback()
return
}
}
fmt.Println("unknown command '" + mainArg + "'")
return
}
// 日志
var writer = new(LogWriter)
writer.Init()
logs.SetWriter(writer)
// 运行主函数
main()
}
// 版本号
func (this *AppCmd) runVersion() {
fmt.Println(this.product+" v"+this.version, "(build: "+runtime.Version(), runtime.GOOS, runtime.GOARCH, teaconst.Tag+")")
}
// 帮助
func (this *AppCmd) runHelp() {
this.Print()
}
// 启动
func (this *AppCmd) runStart() {
var pid = this.getPID()
if pid > 0 {
fmt.Println(this.product+" already started, pid:", pid)
return
}
_ = os.Setenv("EdgeBackground", "on")
var cmd = exec.Command(this.exe())
configureSysProcAttr(cmd)
err := cmd.Start()
if err != nil {
fmt.Println(this.product+" start failed:", err.Error())
return
}
// create symbolic links
_ = this.createSymLinks()
fmt.Println(this.product+" started ok, pid:", cmd.Process.Pid)
}
// 停止
func (this *AppCmd) runStop() {
var pid = this.getPID()
if pid == 0 {
fmt.Println(this.product + " not started yet")
return
}
// 从systemd中停止
if runtime.GOOS == "linux" {
systemctl, _ := executils.LookPath("systemctl")
if len(systemctl) > 0 {
go func() {
// 有可能会长时间执行,这里不阻塞进程
_ = exec.Command(systemctl, "stop", teaconst.SystemdServiceName).Run()
}()
}
}
// 如果仍在运行,则发送停止指令
_, _ = this.sock.SendTimeout(&gosock.Command{Code: "stop"}, 1*time.Second)
fmt.Println(this.product+" stopped ok, pid:", types.String(pid))
}
// 重启
func (this *AppCmd) runRestart() {
this.runStop()
time.Sleep(1 * time.Second)
this.runStart()
}
// 状态
func (this *AppCmd) runStatus() {
var pid = this.getPID()
if pid == 0 {
fmt.Println(this.product + " not started yet")
return
}
fmt.Println(this.product + " is running, pid: " + types.String(pid))
}
// 获取当前的PID
func (this *AppCmd) getPID() int {
if !this.sock.IsListening() {
return 0
}
reply, err := this.sock.Send(&gosock.Command{Code: "pid"})
if err != nil {
return 0
}
return maps.NewMap(reply.Params).GetInt("pid")
}
// ParseOptions 分析参数中的选项
func (this *AppCmd) ParseOptions(args []string) map[string][]string {
var result = map[string][]string{}
for _, arg := range args {
var pieces = strings.SplitN(arg, "=", 2)
var key = strings.TrimLeft(pieces[0], "- ")
key = strings.TrimSpace(key)
var value = ""
if len(pieces) == 2 {
value = strings.TrimSpace(pieces[1])
}
result[key] = append(result[key], value)
}
return result
}
func (this *AppCmd) exe() string {
var exe, _ = os.Executable()
if len(exe) == 0 {
exe = os.Args[0]
}
return exe
}
// 创建软链接
func (this *AppCmd) createSymLinks() error {
if runtime.GOOS != "linux" {
return nil
}
var exe, _ = os.Executable()
if len(exe) == 0 {
return nil
}
var errorList = []string{}
// bin
{
var target = "/usr/bin/" + teaconst.ProcessName
old, _ := filepath.EvalSymlinks(target)
if old != exe {
_ = os.Remove(target)
err := os.Symlink(exe, target)
if err != nil {
errorList = append(errorList, err.Error())
}
}
}
// log
{
var realPath = filepath.Dir(filepath.Dir(exe)) + "/logs/run.log"
var target = "/var/log/" + teaconst.ProcessName + ".log"
old, _ := filepath.EvalSymlinks(target)
if old != realPath {
_ = os.Remove(target)
err := os.Symlink(realPath, target)
if err != nil {
errorList = append(errorList, err.Error())
}
}
}
if len(errorList) > 0 {
return errors.New(strings.Join(errorList, "\n"))
}
return nil
}
func (this *AppCmd) callDirective(code string) {
for _, directive := range this.directives {
if directive.Arg == code {
if directive.Callback != nil {
directive.Callback()
}
return
}
}
}

View File

@@ -0,0 +1,15 @@
//go:build !windows
package apps
import (
"os/exec"
"syscall"
)
func configureSysProcAttr(cmd *exec.Cmd) {
cmd.SysProcAttr = &syscall.SysProcAttr{
Foreground: false,
Setsid: true,
}
}

View File

@@ -0,0 +1,6 @@
package apps
type Directive struct {
Arg string
Callback func()
}

View File

@@ -0,0 +1,111 @@
package apps
import (
"github.com/TeaOSLab/EdgeNode/internal/utils"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/files"
timeutil "github.com/iwind/TeaGo/utils/time"
"log"
"os"
"runtime"
"strconv"
"strings"
)
type LogWriter struct {
fp *os.File
c chan string
}
func (this *LogWriter) Init() {
// 创建目录
var dir = files.NewFile(Tea.LogDir())
if !dir.Exists() {
err := dir.Mkdir()
if err != nil {
log.Println("[LOG]create log dir failed: " + err.Error())
}
}
// 打开要写入的日志文件
var logPath = Tea.LogFile("run.log")
fp, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
log.Println("[LOG]open log file failed: " + err.Error())
} else {
this.fp = fp
}
this.c = make(chan string, 1024)
// 异步写入文件
var maxFileSize int64 = 128 << 20 // 文件最大尺寸,超出此尺寸则清空
if fp != nil {
goman.New(func() {
var totalSize int64 = 0
stat, err := fp.Stat()
if err == nil {
totalSize = stat.Size()
}
for message := range this.c {
totalSize += int64(len(message))
_, err := fp.WriteString(timeutil.Format("Y/m/d H:i:s ") + message + "\n")
if err != nil {
log.Println("[LOG]write log failed: " + err.Error())
} else {
// 如果太大则Truncate
if totalSize > maxFileSize {
_ = fp.Truncate(0)
totalSize = 0
}
}
}
})
}
}
func (this *LogWriter) Write(message string) {
backgroundEnv, _ := os.LookupEnv("EdgeBackground")
if backgroundEnv != "on" {
// 文件和行号
var file string
var line int
if Tea.IsTesting() {
var callDepth = 3
var ok bool
_, file, line, ok = runtime.Caller(callDepth)
if ok {
file = utils.RemoveWorkspace(this.packagePath(file))
}
}
if len(file) > 0 {
log.Println(message + " (" + file + ":" + strconv.Itoa(line) + ")")
} else {
log.Println(message)
}
}
select {
case this.c <- message:
default:
}
}
func (this *LogWriter) Close() {
if this.fp != nil {
_ = this.fp.Close()
}
close(this.c)
}
func (this *LogWriter) packagePath(path string) string {
var pieces = strings.Split(path, "/")
if len(pieces) >= 2 {
return strings.Join(pieces[len(pieces)-2:], "/")
}
return path
}

View File

@@ -0,0 +1,11 @@
// Copyright 2023 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package apps
import teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
func RunMain(f func()) {
if teaconst.IsMain {
f()
}
}

View File

@@ -0,0 +1,11 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
const (
SuffixAll = "@GOEDGE_" // 通用后缀
SuffixWebP = "@GOEDGE_WEBP" // WebP后缀
SuffixCompression = "@GOEDGE_" // 压缩后缀 SuffixCompression + Encoding
SuffixMethod = "@GOEDGE_" // 请求方法后缀 SuffixMethod + RequestMethod
SuffixPartial = "@GOEDGE_partial" // 分区缓存后缀
)

View File

@@ -0,0 +1,61 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import "errors"
// 常用的几个错误
var (
ErrNotFound = errors.New("cache not found")
ErrFileIsWriting = errors.New("the cache file is updating")
ErrInvalidRange = errors.New("invalid range")
ErrEntityTooLarge = errors.New("entity too large")
ErrWritingUnavailable = errors.New("writing unavailable")
ErrWritingQueueFull = errors.New("writing queue full")
ErrServerIsBusy = errors.New("server is busy")
ErrUnexpectedContentLength = errors.New("unexpected content length")
)
// CapacityError 容量错误
// 独立出来是为了可以在有些场合下可以忽略,防止产生没必要的错误提示数量太多
type CapacityError struct {
err string
}
func NewCapacityError(err string) error {
return &CapacityError{err: err}
}
func (this *CapacityError) Error() string {
return this.err
}
// CanIgnoreErr 检查错误是否可以忽略
func CanIgnoreErr(err error) bool {
if err == nil {
return true
}
if errors.Is(err, ErrFileIsWriting) ||
errors.Is(err, ErrEntityTooLarge) ||
errors.Is(err, ErrWritingUnavailable) ||
errors.Is(err, ErrWritingQueueFull) ||
errors.Is(err, ErrServerIsBusy) {
return true
}
var capacityErr *CapacityError
return errors.As(err, &capacityErr)
}
func IsCapacityError(err error) bool {
if err == nil {
return false
}
var capacityErr *CapacityError
return errors.As(err, &capacityErr)
}
func IsBusyError(err error) bool {
return err != nil && errors.Is(err, ErrServerIsBusy)
}

View File

@@ -0,0 +1,24 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"errors"
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestCanIgnoreErr(t *testing.T) {
var a = assert.NewAssertion(t)
a.IsTrue(caches.CanIgnoreErr(caches.ErrFileIsWriting))
a.IsTrue(caches.CanIgnoreErr(fmt.Errorf("error: %w", caches.ErrFileIsWriting)))
a.IsTrue(errors.Is(fmt.Errorf("error: %w", caches.ErrFileIsWriting), caches.ErrFileIsWriting))
a.IsTrue(errors.Is(caches.ErrFileIsWriting, caches.ErrFileIsWriting))
a.IsTrue(caches.CanIgnoreErr(caches.NewCapacityError("over capacity")))
a.IsTrue(caches.CanIgnoreErr(fmt.Errorf("error: %w", caches.NewCapacityError("over capacity"))))
a.IsFalse(caches.CanIgnoreErr(caches.ErrNotFound))
a.IsFalse(caches.CanIgnoreErr(errors.New("test error")))
}

View File

@@ -0,0 +1,12 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import "github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
type FileDir struct {
Path string
Capacity *shared.SizeCapacity
IsFull bool
IsSSD bool // 优化:是否为 SSD
}

View File

@@ -0,0 +1,8 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
type HotItem struct {
Key string
Hits uint32
}

View File

@@ -0,0 +1,59 @@
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"strings"
)
type ItemType = int
const (
ItemTypeFile ItemType = 1
ItemTypeMemory ItemType = 2
)
// 计算当前周
// 不要用YW因为需要计算两周是否临近
func currentWeek() int32 {
return int32(fasttime.Now().Unix() / 86400)
}
type Item struct {
Type ItemType `json:"-"`
Key string `json:"1,omitempty"`
ExpiresAt int64 `json:"2,omitempty"`
StaleAt int64 `json:"3,omitempty"`
HeaderSize int64 `json:"-"`
BodySize int64 `json:"4,omitempty"`
MetaSize int64 `json:"-"`
Host string `json:"-"` // 主机名
ServerId int64 `json:"5,omitempty"` // 服务ID
Week int32 `json:"-"`
CreatedAt int64 `json:"6,omitempty"`
}
func (this *Item) IsExpired() bool {
return this.ExpiresAt < fasttime.Now().Unix()
}
func (this *Item) TotalSize() int64 {
return this.Size() + this.MetaSize + int64(len(this.Key)) + int64(len(this.Host))
}
func (this *Item) Size() int64 {
return this.HeaderSize + this.BodySize
}
func (this *Item) RequestURI() string {
var schemeIndex = strings.Index(this.Key, "://")
if schemeIndex <= 0 {
return ""
}
var firstSlashIndex = strings.Index(this.Key[schemeIndex+3:], "/")
if firstSlashIndex <= 0 {
return ""
}
return this.Key[schemeIndex+3+firstSlashIndex:]
}

View File

@@ -0,0 +1,118 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"encoding/json"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
"runtime"
"testing"
"time"
)
func TestItem_Marshal(t *testing.T) {
{
var item = &caches.Item{}
data, err := json.Marshal(item)
if err != nil {
t.Fatal(err)
}
t.Log(string(data))
}
{
var item = &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://example.com/index.html",
ExpiresAt: fasttime.Now().Unix(),
HeaderSize: 1 << 10,
BodySize: 1 << 20,
MetaSize: 256,
}
data, err := json.Marshal(item)
if err != nil {
t.Fatal(err)
}
t.Log(string(data))
}
}
func TestItems_Memory(t *testing.T) {
var stat = &runtime.MemStats{}
runtime.ReadMemStats(stat)
var memory1 = stat.HeapInuse
var items = []*caches.Item{}
var count = 100
if testutils.IsSingleTesting() {
count = 10_000_000
}
for i := 0; i < count; i++ {
items = append(items, &caches.Item{
Key: types.String(i),
})
}
runtime.ReadMemStats(stat)
var memory2 = stat.HeapInuse
t.Log(memory1, memory2, (memory2-memory1)/1024/1024, "M")
runtime.ReadMemStats(stat)
var memory3 = stat.HeapInuse
t.Log(memory2, memory3, (memory3-memory2)/1024/1024, "M")
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Second)
}
}
func TestItems_Memory2(t *testing.T) {
var stat = &runtime.MemStats{}
runtime.ReadMemStats(stat)
var memory1 = stat.HeapInuse
var items = map[int32]map[string]zero.Zero{}
var count = 100
if testutils.IsSingleTesting() {
count = 10_000_000
}
for i := 0; i < count; i++ {
var week = int32((time.Now().Unix() - int64(86400*rands.Int(0, 300))) / (86400 * 7))
m, ok := items[week]
if !ok {
m = map[string]zero.Zero{}
items[week] = m
}
m[types.String(int64(i)*1_000_000)] = zero.New()
}
runtime.ReadMemStats(stat)
var memory2 = stat.HeapInuse
t.Log(memory1, memory2, (memory2-memory1)/1024/1024, "M")
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Second)
}
for w, i := range items {
t.Log(w, len(i))
}
}
func TestItem_RequestURI(t *testing.T) {
for _, u := range []string{
"https://goedge.cn/hello/world",
"https://goedge.cn:8080/hello/world",
"https://goedge.cn/hello/world?v=1&t=123",
} {
var item = &caches.Item{Key: u}
t.Log(u, "=>", item.RequestURI())
}
}

View File

@@ -0,0 +1,627 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"errors"
"fmt"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/types"
"net"
"net/url"
"os"
"runtime"
"strings"
"time"
)
type SQLiteFileListDB struct {
dbPath string
readDB *dbs.DB
writeDB *dbs.DB
hashMap *SQLiteFileListHashMap
itemsTableName string
isClosed bool // 是否已关闭
isReady bool // 是否已完成初始化
hashMapIsLoaded bool // Hash是否已加载
// cacheItems
existsByHashStmt *dbs.Stmt // 根据hash检查是否存在
insertStmt *dbs.Stmt // 写入数据
insertSQL string
selectByHashStmt *dbs.Stmt // 使用hash查询数据
selectHashListStmt *dbs.Stmt
deleteByHashStmt *dbs.Stmt // 根据hash删除数据
deleteByHashSQL string
statStmt *dbs.Stmt // 统计
purgeStmt *dbs.Stmt // 清理
deleteAllStmt *dbs.Stmt // 删除所有数据
listOlderItemsStmt *dbs.Stmt // 读取较早存储的缓存
}
func NewSQLiteFileListDB() *SQLiteFileListDB {
return &SQLiteFileListDB{
hashMap: NewSQLiteFileListHashMap(),
}
}
func (this *SQLiteFileListDB) Open(dbPath string) error {
this.dbPath = dbPath
// 动态调整Cache值
var cacheSize = 512
var memoryGB = memutils.SystemMemoryGB()
if memoryGB >= 1 {
cacheSize = 256 * memoryGB
}
// write db
// 这里不能加 EXCLUSIVE 锁,不然异步事务可能会失败
writeDB, err := dbs.OpenWriter("file:" + dbPath + "?cache=private&mode=rwc&_journal_mode=WAL&_sync=" + dbs.SyncMode + "&_cache_size=" + types.String(cacheSize) + "&_secure_delete=FAST")
if err != nil {
return fmt.Errorf("open write database failed: %w", err)
}
writeDB.SetMaxOpenConns(1)
this.writeDB = writeDB
// TODO 耗时过长,暂时不整理数据库
// TODO 需要根据行数来判断是否VACUUM
// TODO 注意VACUUM反而可能让数据库文件变大
/**_, err = db.Exec("VACUUM")
if err != nil {
return err
}**/
// 检查是否损坏
// TODO 暂时屏蔽,因为用时过长
var recoverEnv, _ = os.LookupEnv("EdgeRecover")
if len(recoverEnv) > 0 && this.shouldRecover() {
for _, indexName := range []string{"staleAt", "hash"} {
_, _ = this.writeDB.Exec(`REINDEX "` + indexName + `"`)
}
}
if teaconst.EnableDBStat {
this.writeDB.EnableStat(true)
}
// read db
readDB, err := dbs.OpenReader("file:" + dbPath + "?cache=private&mode=ro&_journal_mode=WAL&_sync=OFF&_cache_size=" + types.String(cacheSize))
if err != nil {
return fmt.Errorf("open read database failed: %w", err)
}
readDB.SetMaxOpenConns(runtime.NumCPU())
this.readDB = readDB
if teaconst.EnableDBStat {
this.readDB.EnableStat(true)
}
return nil
}
func (this *SQLiteFileListDB) Init() error {
this.itemsTableName = "cacheItems"
// 创建
var err = this.initTables(1)
if err != nil {
return fmt.Errorf("init tables failed: %w", err)
}
// 常用语句
this.existsByHashStmt, err = this.readDB.Prepare(`SELECT "expiredAt" FROM "` + this.itemsTableName + `" INDEXED BY "hash" WHERE "hash"=? AND expiredAt>? LIMIT 1`)
if err != nil {
return err
}
this.insertSQL = `INSERT INTO "` + this.itemsTableName + `" ("hash", "key", "headerSize", "bodySize", "metaSize", "expiredAt", "staleAt", "host", "serverId", "createdAt") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
this.insertStmt, err = this.writeDB.Prepare(this.insertSQL)
if err != nil {
return err
}
this.selectByHashStmt, err = this.readDB.Prepare(`SELECT "key", "headerSize", "bodySize", "metaSize", "expiredAt" FROM "` + this.itemsTableName + `" WHERE "hash"=? LIMIT 1`)
if err != nil {
return err
}
this.selectHashListStmt, err = this.readDB.Prepare(`SELECT "id", "hash" FROM "` + this.itemsTableName + `" WHERE id>? ORDER BY id ASC LIMIT 2000`)
if err != nil {
return err
}
this.deleteByHashSQL = `DELETE FROM "` + this.itemsTableName + `" WHERE "hash"=?`
this.deleteByHashStmt, err = this.writeDB.Prepare(this.deleteByHashSQL)
if err != nil {
return err
}
this.statStmt, err = this.readDB.Prepare(`SELECT COUNT(*), IFNULL(SUM(headerSize+bodySize+metaSize), 0), IFNULL(SUM(headerSize+bodySize), 0) FROM "` + this.itemsTableName + `"`)
if err != nil {
return err
}
this.purgeStmt, err = this.readDB.Prepare(`SELECT "hash" FROM "` + this.itemsTableName + `" WHERE staleAt<=? LIMIT ?`)
if err != nil {
return err
}
this.deleteAllStmt, err = this.writeDB.Prepare(`DELETE FROM "` + this.itemsTableName + `"`)
if err != nil {
return err
}
this.listOlderItemsStmt, err = this.readDB.Prepare(`SELECT "hash" FROM "` + this.itemsTableName + `" ORDER BY "id" ASC LIMIT ?`)
if err != nil {
return err
}
this.isReady = true
// 加载HashMap
go this.loadHashMap()
return nil
}
func (this *SQLiteFileListDB) IsReady() bool {
return this.isReady
}
func (this *SQLiteFileListDB) Total() (int64, error) {
// 读取总数量
var row = this.readDB.QueryRow(`SELECT COUNT(*) FROM "` + this.itemsTableName + `"`)
if row.Err() != nil {
return 0, row.Err()
}
var total int64
err := row.Scan(&total)
return total, err
}
func (this *SQLiteFileListDB) AddSync(hash string, item *Item) error {
this.hashMap.Add(hash)
if item.StaleAt == 0 {
item.StaleAt = item.ExpiresAt
}
_, err := this.insertStmt.Exec(hash, item.Key, item.HeaderSize, item.BodySize, item.MetaSize, item.ExpiresAt, item.StaleAt, item.Host, item.ServerId, fasttime.Now().Unix())
if err != nil {
return this.WrapError(err)
}
return nil
}
func (this *SQLiteFileListDB) DeleteSync(hash string) error {
this.hashMap.Delete(hash)
_, err := this.deleteByHashStmt.Exec(hash)
if err != nil {
return err
}
return nil
}
func (this *SQLiteFileListDB) ListExpiredItems(count int) (hashList []string, err error) {
if !this.isReady {
return nil, nil
}
if count <= 0 {
count = 100
}
rows, err := this.purgeStmt.Query(time.Now().Unix(), count)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
for rows.Next() {
var hash string
err = rows.Scan(&hash)
if err != nil {
return nil, err
}
hashList = append(hashList, hash)
}
return hashList, nil
}
func (this *SQLiteFileListDB) ListLFUItems(count int) (hashList []string, err error) {
if !this.isReady {
return nil, nil
}
if count <= 0 {
count = 100
}
// 先找过期的
hashList, err = this.ListExpiredItems(count)
if err != nil {
return
}
var l = len(hashList)
// 从旧缓存中补充
if l < count {
oldHashList, err := this.listOlderItems(count - l)
if err != nil {
return nil, err
}
hashList = append(hashList, oldHashList...)
}
return hashList, nil
}
func (this *SQLiteFileListDB) ListHashes(lastId int64) (hashList []string, maxId int64, err error) {
rows, err := this.selectHashListStmt.Query(lastId)
if err != nil {
return nil, 0, err
}
var id int64
var hash string
for rows.Next() {
err = rows.Scan(&id, &hash)
if err != nil {
_ = rows.Close()
return
}
maxId = id
hashList = append(hashList, hash)
}
_ = rows.Close()
return
}
func (this *SQLiteFileListDB) IncreaseHitAsync(hash string) error {
// do nothing
return nil
}
func (this *SQLiteFileListDB) CleanPrefix(prefix string) error {
if !this.isReady {
return nil
}
var count = int64(10000)
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
for {
result, err := this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET expiredAt=0,staleAt=? WHERE id IN (SELECT id FROM "`+this.itemsTableName+`" WHERE expiredAt>0 AND createdAt<=? AND INSTR("key", ?)=1 LIMIT `+types.String(count)+`)`, unixTime+DefaultStaleCacheSeconds, unixTime, prefix)
if err != nil {
return this.WrapError(err)
}
affectedRows, err := result.RowsAffected()
if err != nil {
return err
}
if affectedRows < count {
return nil
}
}
}
func (this *SQLiteFileListDB) CleanMatchKey(key string) error {
if !this.isReady {
return nil
}
// 忽略 @GOEDGE_
if strings.Contains(key, SuffixAll) {
return nil
}
u, err := url.Parse(key)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
// 转义
var queryKey = strings.ReplaceAll(key, "%", "\\%")
queryKey = strings.ReplaceAll(queryKey, "_", "\\_")
queryKey = strings.Replace(queryKey, "*", "%", 1)
// TODO 检查大批量数据下的操作性能
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryKey)
if err != nil {
return err
}
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryKey+SuffixAll+"%")
if err != nil {
return err
}
return nil
}
func (this *SQLiteFileListDB) CleanMatchPrefix(prefix string) error {
if !this.isReady {
return nil
}
u, err := url.Parse(prefix)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
// 转义
var queryPrefix = strings.ReplaceAll(prefix, "%", "\\%")
queryPrefix = strings.ReplaceAll(queryPrefix, "_", "\\_")
queryPrefix = strings.Replace(queryPrefix, "*", "%", 1)
queryPrefix += "%"
// TODO 检查大批量数据下的操作性能
var unixTime = fasttime.Now().Unix() // 只删除当前的,不删除新的
_, err = this.writeDB.Exec(`UPDATE "`+this.itemsTableName+`" SET "expiredAt"=0, "staleAt"=? WHERE "host" GLOB ? AND "host" NOT GLOB ? AND "key" LIKE ? ESCAPE '\'`, unixTime+DefaultStaleCacheSeconds, host, "*."+host, queryPrefix)
return err
}
func (this *SQLiteFileListDB) CleanAll() error {
if !this.isReady {
return nil
}
_, err := this.deleteAllStmt.Exec()
if err != nil {
return this.WrapError(err)
}
this.hashMap.Clean()
return nil
}
func (this *SQLiteFileListDB) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
this.isReady = false
if this.existsByHashStmt != nil {
_ = this.existsByHashStmt.Close()
}
if this.insertStmt != nil {
_ = this.insertStmt.Close()
}
if this.selectByHashStmt != nil {
_ = this.selectByHashStmt.Close()
}
if this.selectHashListStmt != nil {
_ = this.selectHashListStmt.Close()
}
if this.deleteByHashStmt != nil {
_ = this.deleteByHashStmt.Close()
}
if this.statStmt != nil {
_ = this.statStmt.Close()
}
if this.purgeStmt != nil {
_ = this.purgeStmt.Close()
}
if this.deleteAllStmt != nil {
_ = this.deleteAllStmt.Close()
}
if this.listOlderItemsStmt != nil {
_ = this.listOlderItemsStmt.Close()
}
var errStrings []string
if this.readDB != nil {
err := this.readDB.Close()
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
if this.writeDB != nil {
err := this.writeDB.Close()
if err != nil {
errStrings = append(errStrings, err.Error())
}
}
if len(errStrings) == 0 {
return nil
}
return errors.New("close database failed: " + strings.Join(errStrings, ", "))
}
func (this *SQLiteFileListDB) WrapError(err error) error {
if err == nil {
return nil
}
return fmt.Errorf("%w (file: %s)", err, this.dbPath)
}
func (this *SQLiteFileListDB) HashMapIsLoaded() bool {
return this.hashMapIsLoaded
}
// 初始化
func (this *SQLiteFileListDB) initTables(times int) error {
{
// expiredAt - 过期时间,用来判断有无过期
// staleAt - 过时缓存最大时间,用来清理缓存
// 不对 hash 增加 unique 参数,是尽可能避免产生 malformed 错误
_, err := this.writeDB.Exec(`CREATE TABLE IF NOT EXISTS "` + this.itemsTableName + `" (
"id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
"hash" varchar(32),
"key" varchar(1024),
"tag" varchar(64),
"headerSize" integer DEFAULT 0,
"bodySize" integer DEFAULT 0,
"metaSize" integer DEFAULT 0,
"expiredAt" integer DEFAULT 0,
"staleAt" integer DEFAULT 0,
"createdAt" integer DEFAULT 0,
"host" varchar(128),
"serverId" integer
);
DROP INDEX IF EXISTS "createdAt";
DROP INDEX IF EXISTS "expiredAt";
DROP INDEX IF EXISTS "serverId";
CREATE INDEX IF NOT EXISTS "staleAt"
ON "` + this.itemsTableName + `" (
"staleAt" ASC
);
CREATE INDEX IF NOT EXISTS "hash"
ON "` + this.itemsTableName + `" (
"hash" ASC
);
`)
if err != nil {
// 忽略可以预期的错误
if strings.Contains(err.Error(), "duplicate column name") {
err = nil
}
// 尝试删除重建
if err != nil {
if times < 3 {
_, dropErr := this.writeDB.Exec(`DROP TABLE "` + this.itemsTableName + `"`)
if dropErr == nil {
return this.initTables(times + 1)
}
return this.WrapError(err)
}
return this.WrapError(err)
}
}
}
// 删除hits表
{
_, _ = this.writeDB.Exec(`DROP TABLE "hits"`)
}
return nil
}
func (this *SQLiteFileListDB) listOlderItems(count int) (hashList []string, err error) {
rows, err := this.listOlderItemsStmt.Query(count)
if err != nil {
return nil, err
}
defer func() {
_ = rows.Close()
}()
for rows.Next() {
var hash string
err = rows.Scan(&hash)
if err != nil {
return nil, err
}
hashList = append(hashList, hash)
}
return hashList, nil
}
func (this *SQLiteFileListDB) shouldRecover() bool {
result, err := this.writeDB.Query("pragma integrity_check;")
if err != nil {
logs.Println(result)
}
var errString = ""
var shouldRecover = false
if result.Next() {
_ = result.Scan(&errString)
if strings.TrimSpace(errString) != "ok" {
shouldRecover = true
}
}
_ = result.Close()
return shouldRecover
}
// 删除数据库文件
func (this *SQLiteFileListDB) deleteDB() {
_ = fsutils.Remove(this.dbPath)
_ = fsutils.Remove(this.dbPath + "-shm")
_ = fsutils.Remove(this.dbPath + "-wal")
}
// 加载Hash列表
func (this *SQLiteFileListDB) loadHashMap() {
this.hashMapIsLoaded = false
err := this.hashMap.Load(this)
if err != nil {
remotelogs.Error("LIST_FILE_DB", "load hash map failed: "+err.Error()+"(file: "+this.dbPath+")")
// 自动修复错误
// TODO 将来希望能尽可能恢复以往数据库中的内容
if strings.Contains(err.Error(), "database is closed") || strings.Contains(err.Error(), "database disk image is malformed") {
_ = this.Close()
this.deleteDB()
remotelogs.Println("LIST_FILE_DB", "recreating the database (file:"+this.dbPath+") ...")
err = this.Open(this.dbPath)
if err != nil {
remotelogs.Error("LIST_FILE_DB", "recreate the database failed: "+err.Error()+" (file:"+this.dbPath+")")
} else {
_ = this.Init()
}
}
return
}
this.hashMapIsLoaded = true
}

View File

@@ -0,0 +1,170 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
"runtime"
"runtime/debug"
"testing"
"time"
)
func TestFileListDB_ListLFUItems(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
//err := db.Open(Tea.Root + "/data/cache-index/p1/db-0.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
hashList, err := db.ListLFUItems(100)
if err != nil {
t.Fatal(err)
}
t.Log("[", len(hashList), "]", hashList)
}
func TestFileListDB_CleanMatchKey(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchKey("https://*.goedge.cn/large-text")
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchKey("https://*.goedge.cn:1234/large-text?%2B____")
if err != nil {
t.Fatal(err)
}
}
func TestFileListDB_CleanMatchPrefix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-db-large.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchPrefix("https://*.goedge.cn/large-text")
if err != nil {
t.Fatal(err)
}
err = db.CleanMatchPrefix("https://*.goedge.cn:1234/large-text?%2B____")
if err != nil {
t.Fatal(err)
}
}
func TestFileListDB_Memory(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var db = caches.NewSQLiteFileListDB()
defer func() {
_ = db.Close()
}()
err := db.Open(Tea.Root + "/data/cache-index/p1/db-0.db")
if err != nil {
t.Fatal(err)
}
err = db.Init()
if err != nil {
t.Fatal(err)
}
t.Log(db.Total())
// load hashes
var maxId int64
var hashList []string
var before = time.Now()
for i := 0; i < 1_000; i++ {
hashList, maxId, err = db.ListHashes(maxId)
if err != nil {
t.Fatal(err)
}
if len(hashList) == 0 {
t.Log("hashes loaded", time.Since(before).Seconds()*1000, "ms")
break
}
if i%100 == 0 {
t.Log(i)
}
}
runtime.GC()
debug.FreeOSMemory()
//time.Sleep(600 * time.Second)
for i := 0; i < 1_000; i++ {
_, err = db.ListLFUItems(5000)
if err != nil {
t.Fatal(err)
}
if i%100 == 0 {
t.Log(i)
}
}
t.Log("loaded")
runtime.GC()
debug.FreeOSMemory()
time.Sleep(600 * time.Second)
}

View File

@@ -0,0 +1,183 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"math/big"
"sync"
)
const HashMapSharding = 31
var bigIntPool = sync.Pool{
New: func() any {
return big.NewInt(0)
},
}
// SQLiteFileListHashMap 文件Hash列表
type SQLiteFileListHashMap struct {
m []map[uint64]zero.Zero
lockers []*sync.RWMutex
isAvailable bool
isReady bool
}
func NewSQLiteFileListHashMap() *SQLiteFileListHashMap {
var m = make([]map[uint64]zero.Zero, HashMapSharding)
var lockers = make([]*sync.RWMutex, HashMapSharding)
for i := 0; i < HashMapSharding; i++ {
m[i] = map[uint64]zero.Zero{}
lockers[i] = &sync.RWMutex{}
}
return &SQLiteFileListHashMap{
m: m,
lockers: lockers,
isAvailable: false,
isReady: false,
}
}
func (this *SQLiteFileListHashMap) Load(db *SQLiteFileListDB) error {
// 如果系统内存过小,我们不缓存
if memutils.SystemMemoryGB() < 3 {
return nil
}
this.isAvailable = true
var lastId int64
var maxLoops = 50_000
for {
hashList, maxId, err := db.ListHashes(lastId)
if err != nil {
return err
}
if len(hashList) == 0 {
break
}
this.AddHashes(hashList)
lastId = maxId
maxLoops--
if maxLoops <= 0 {
break
}
}
this.isReady = true
return nil
}
func (this *SQLiteFileListHashMap) Add(hash string) {
if !this.isAvailable {
return
}
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
this.m[index][hashInt] = zero.New()
this.lockers[index].Unlock()
}
func (this *SQLiteFileListHashMap) AddHashes(hashes []string) {
if !this.isAvailable {
return
}
for _, hash := range hashes {
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
this.m[index][hashInt] = zero.New()
this.lockers[index].Unlock()
}
}
func (this *SQLiteFileListHashMap) Delete(hash string) {
if !this.isAvailable {
return
}
hashInt, index := this.bigInt(hash)
this.lockers[index].Lock()
delete(this.m[index], hashInt)
this.lockers[index].Unlock()
}
func (this *SQLiteFileListHashMap) Exist(hash string) bool {
if !this.isAvailable {
return true
}
if !this.isReady {
// 只有完全Ready时才能判断是否为false
return true
}
hashInt, index := this.bigInt(hash)
this.lockers[index].RLock()
_, ok := this.m[index][hashInt]
this.lockers[index].RUnlock()
return ok
}
func (this *SQLiteFileListHashMap) Clean() {
for i := 0; i < HashMapSharding; i++ {
this.lockers[i].Lock()
}
// 这里不能简单清空 this.m ,避免导致别的数据无法写入 map 而产生 panic
for i := 0; i < HashMapSharding; i++ {
this.m[i] = map[uint64]zero.Zero{}
}
for i := HashMapSharding - 1; i >= 0; i-- {
this.lockers[i].Unlock()
}
}
func (this *SQLiteFileListHashMap) IsReady() bool {
return this.isReady
}
func (this *SQLiteFileListHashMap) Len() int {
for i := 0; i < HashMapSharding; i++ {
this.lockers[i].Lock()
}
var count = 0
for _, shard := range this.m {
count += len(shard)
}
for i := HashMapSharding - 1; i >= 0; i-- {
this.lockers[i].Unlock()
}
return count
}
func (this *SQLiteFileListHashMap) SetIsAvailable(isAvailable bool) {
this.isAvailable = isAvailable
}
func (this *SQLiteFileListHashMap) SetIsReady(isReady bool) {
this.isReady = isReady
}
func (this *SQLiteFileListHashMap) bigInt(hash string) (hashInt uint64, index int) {
var bigInt = bigIntPool.Get().(*big.Int)
bigInt.SetString(hash, 16)
hashInt = bigInt.Uint64()
bigIntPool.Put(bigInt)
index = int(hashInt % HashMapSharding)
return
}

View File

@@ -0,0 +1,169 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/assert"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/big"
"runtime"
"strconv"
"testing"
"time"
)
func TestFileListHashMap_Memory(t *testing.T) {
var stat1 = &runtime.MemStats{}
runtime.ReadMemStats(stat1)
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
for i := 0; i < 1_000_000; i++ {
m.Add(stringutil.Md5(types.String(i)))
}
t.Log("added:", m.Len(), "hashes")
var stat2 = &runtime.MemStats{}
runtime.ReadMemStats(stat2)
t.Log("ready", (stat2.Alloc-stat1.Alloc)/1024/1024, "M")
t.Log("remains:", m.Len(), "hashes")
}
func TestFileListHashMap_Memory2(t *testing.T) {
var stat1 = &runtime.MemStats{}
runtime.ReadMemStats(stat1)
var m = map[uint64]zero.Zero{}
for i := 0; i < 1_000_000; i++ {
m[uint64(i)] = zero.New()
}
var stat2 = &runtime.MemStats{}
runtime.ReadMemStats(stat2)
t.Log("ready", (stat2.Alloc-stat1.Alloc)/1024/1024, "M")
}
func TestFileListHashMap_BigInt(t *testing.T) {
var bigInt = big.NewInt(0)
for _, s := range []string{"1", "2", "3", "123", "123456"} {
var hash = stringutil.Md5(s)
var bigInt1 = big.NewInt(0)
bigInt1.SetString(hash, 16)
bigInt.SetString(hash, 16)
t.Log(s, "=>", bigInt1.Uint64(), "hash:", hash, "format:", strconv.FormatUint(bigInt1.Uint64(), 16), strconv.FormatUint(bigInt.Uint64(), 16))
if strconv.FormatUint(bigInt1.Uint64(), 16) != strconv.FormatUint(bigInt.Uint64(), 16) {
t.Fatal("not equal")
}
}
for i := 0; i < 1_000_000; i++ {
var hash = stringutil.Md5(types.String(i))
var bigInt1 = big.NewInt(0)
bigInt1.SetString(hash, 16)
bigInt.SetString(hash, 16)
if bigInt1.Uint64() != bigInt.Uint64() {
t.Fatal(i, "not equal")
}
}
}
func TestFileListHashMap_Load(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var m = caches.NewSQLiteFileListHashMap()
var before = time.Now()
var db = list.GetDB("abc")
err = m.Load(db)
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log("count:", m.Len())
m.Add("abc")
for _, hash := range []string{"33347bb4441265405347816cad36a0f8", "a", "abc", "123"} {
t.Log(hash, "=>", m.Exist(hash))
}
}
func TestFileListHashMap_Delete(t *testing.T) {
var a = assert.NewAssertion(t)
var m = caches.NewSQLiteFileListHashMap()
m.SetIsReady(true)
m.SetIsAvailable(true)
m.Add("a")
a.IsTrue(m.Len() == 1)
m.Delete("a")
a.IsTrue(m.Len() == 0)
}
func TestFileListHashMap_Clean(t *testing.T) {
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
m.Clean()
m.Add("a")
}
func Benchmark_BigInt(b *testing.B) {
var hash = stringutil.Md5("123456")
b.ResetTimer()
for i := 0; i < b.N; i++ {
var bigInt = big.NewInt(0)
bigInt.SetString(hash, 16)
_ = bigInt.Uint64()
}
}
func BenchmarkFileListHashMap_Exist(b *testing.B) {
var m = caches.NewSQLiteFileListHashMap()
m.SetIsAvailable(true)
m.SetIsReady(true)
for i := 0; i < 1_000_000; i++ {
m.Add(types.String(i))
}
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m.Add(types.String(rands.Int64()))
_ = m.Exist(types.String(rands.Int64()))
}
})
}

View File

@@ -0,0 +1,350 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/iwind/TeaGo/types"
"strings"
"testing"
)
const countKVStores = 10
type KVFileList struct {
dir string
stores [countKVStores]*KVListFileStore
onAdd func(item *Item)
onRemove func(item *Item)
memCache *ttlcache.Cache[int64]
}
func NewKVFileList(dir string) *KVFileList {
var memGB = memutils.SystemMemoryGB()
if memGB <= 0 {
memGB = 1
}
var maxCachePieces = 32
var maxCacheItems = memGB << 15
var memCache = ttlcache.NewCache[int64](ttlcache.NewPiecesOption(maxCachePieces), ttlcache.NewMaxItemsOption(maxCacheItems))
dir = strings.TrimSuffix(dir, "/")
var stores = [countKVStores]*KVListFileStore{}
for i := 0; i < countKVStores; i++ {
stores[i] = NewKVListFileStore(dir+"/db-"+types.String(i)+".store", memCache)
}
return &KVFileList{
dir: dir,
stores: stores,
memCache: memCache,
}
}
// Init 初始化
func (this *KVFileList) Init() error {
remotelogs.Println("CACHE", "loading database from '"+this.dir+"' ...")
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.Open()
if err != nil {
lastErr = fmt.Errorf("open store '"+storeCopy.Path()+"' failed: %w", err)
}
})
}
group.Wait()
return lastErr
}
// Reset 重置数据
func (this *KVFileList) Reset() error {
this.memCache.Clean()
return nil
}
// Add 添加内容
func (this *KVFileList) Add(hash string, item *Item) error {
err := this.getStore(hash).AddItem(hash, item)
if err != nil {
return err
}
if this.onAdd != nil {
this.onAdd(item)
}
if item.ExpiresAt > 0 {
this.memCache.Write(hash, item.HeaderSize+item.BodySize, min(item.ExpiresAt, fasttime.Now().Unix()+3600))
}
return nil
}
// Exist 检查内容是否存在
func (this *KVFileList) Exist(hash string) (bool, int64, error) {
// read from cache
var cacheItem = this.memCache.Read(hash)
if cacheItem != nil {
return true, cacheItem.Value, nil
}
return this.getStore(hash).ExistItem(hash)
}
// ExistQuick 快速检查内容是否存在
func (this *KVFileList) ExistQuick(hash string) (bool, error) {
// read from cache
if this.memCache.Read(hash) != nil {
return true, nil
}
return this.getStore(hash).ExistQuickItem(hash)
}
// CleanPrefix 清除某个前缀的缓存
func (this *KVFileList) CleanPrefix(prefix string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithPrefix(prefix)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// CleanMatchKey 清除通配符匹配的Key
func (this *KVFileList) CleanMatchKey(key string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithWildcardKey(key)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// CleanMatchPrefix 清除通配符匹配的前缀
func (this *KVFileList) CleanMatchPrefix(prefix string) error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.CleanItemsWithWildcardPrefix(prefix)
if err != nil {
lastErr = err
}
})
}
group.Wait()
return lastErr
}
// Remove 删除内容
func (this *KVFileList) Remove(hash string) error {
err := this.getStore(hash).RemoveItem(hash)
if err != nil {
return err
}
if this.onRemove != nil {
// when remove file item, no any extra information needed
this.onRemove(nil)
}
// remove from cache
this.memCache.Delete(hash)
return nil
}
// Purge 清理过期数据
func (this *KVFileList) Purge(count int, callback func(hash string) error) (int, error) {
count /= countKVStores
if count <= 0 {
count = 100
}
var countFound = 0
var lastErr error
for _, store := range this.stores {
purgeCount, err := store.PurgeItems(count, callback)
countFound += purgeCount
if err != nil {
lastErr = err
}
}
return countFound, lastErr
}
// PurgeLFU 清理LFU数据
func (this *KVFileList) PurgeLFU(count int, callback func(hash string) error) error {
count /= countKVStores
if count <= 0 {
count = 100
}
var lastErr error
for _, store := range this.stores {
err := store.PurgeLFUItems(count, callback)
if err != nil {
lastErr = err
}
}
return lastErr
}
// CleanAll 清除所有缓存
func (this *KVFileList) CleanAll() error {
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.RemoveAllItems()
if err != nil {
lastErr = err
}
})
}
group.Wait()
this.memCache.Clean()
return lastErr
}
// Stat 统计
func (this *KVFileList) Stat(check func(hash string) bool) (*Stat, error) {
var stat = &Stat{}
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
storeStat, err := storeCopy.StatItems()
if err != nil {
lastErr = err
return
}
group.Lock()
stat.Size += storeStat.Size
stat.ValueSize += storeStat.ValueSize
stat.Count += storeStat.Count
group.Unlock()
})
}
group.Wait()
return stat, lastErr
}
// Count 总数量
func (this *KVFileList) Count() (int64, error) {
var count int64
var group = goman.NewTaskGroup()
var lastErr error
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
countStoreItems, err := storeCopy.CountItems()
if err != nil {
lastErr = err
return
}
group.Lock()
count += countStoreItems
group.Unlock()
})
}
group.Wait()
return count, lastErr
}
// OnAdd 添加事件
func (this *KVFileList) OnAdd(fn func(item *Item)) {
this.onAdd = fn
}
// OnRemove 删除事件
func (this *KVFileList) OnRemove(fn func(item *Item)) {
this.onRemove = fn
}
// Close 关闭
func (this *KVFileList) Close() error {
var lastErr error
var group = goman.NewTaskGroup()
for _, store := range this.stores {
var storeCopy = store
group.Run(func() {
err := storeCopy.Close()
if err != nil {
lastErr = err
}
})
}
group.Wait()
this.memCache.Destroy()
return lastErr
}
// IncreaseHit 增加点击量
func (this *KVFileList) IncreaseHit(hash string) error {
// do nothing
return nil
}
func (this *KVFileList) TestInspect(t *testing.T) error {
for _, store := range this.stores {
err := store.TestInspect(t)
if err != nil {
return err
}
}
return nil
}
func (this *KVFileList) getStore(hash string) *KVListFileStore {
return this.stores[fnv.HashString(hash)%countKVStores]
}

View File

@@ -0,0 +1,66 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"encoding/binary"
"encoding/json"
"strings"
)
// ItemKVEncoder item encoder
type ItemKVEncoder[T interface{ *Item }] struct {
}
func NewItemKVEncoder[T interface{ *Item }]() *ItemKVEncoder[T] {
return &ItemKVEncoder[T]{}
}
func (this *ItemKVEncoder[T]) Encode(value T) ([]byte, error) {
return json.Marshal(value)
}
func (this *ItemKVEncoder[T]) EncodeField(value T, fieldName string) ([]byte, error) {
switch fieldName {
case "createdAt":
var b = make([]byte, 4)
var createdAt = any(value).(*Item).CreatedAt
binary.BigEndian.PutUint32(b, uint32(createdAt))
return b, nil
case "staleAt":
var b = make([]byte, 4)
var staleAt = any(value).(*Item).StaleAt
if staleAt < 0 {
staleAt = 0
}
binary.BigEndian.PutUint32(b, uint32(staleAt))
return b, nil
case "serverId":
var b = make([]byte, 4)
var serverId = any(value).(*Item).ServerId
if serverId < 0 {
serverId = 0
}
binary.BigEndian.PutUint32(b, uint32(serverId))
return b, nil
case "key":
return []byte(any(value).(*Item).Key), nil
case "wildKey":
var key = any(value).(*Item).Key
var dotIndex = strings.Index(key, ".")
if dotIndex > 0 {
var slashIndex = strings.LastIndex(key[:dotIndex], "/")
if slashIndex > 0 {
key = key[:dotIndex][:slashIndex+1] + "*" + key[dotIndex:]
}
}
return []byte(key), nil
}
return nil, nil
}
func (this *ItemKVEncoder[T]) Decode(valueBytes []byte) (value T, err error) {
err = json.Unmarshal(valueBytes, &value)
return
}

View File

@@ -0,0 +1,69 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestItemKVEncoder_EncodeField(t *testing.T) {
var a = assert.NewAssertion(t)
var encoder = caches.NewItemKVEncoder[*caches.Item]()
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://example.com/index.html",
}, "key")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://example.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "example.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://*.com/index.html")
}
{
key, err := encoder.EncodeField(&caches.Item{
Key: "https://www.example.com/index.html",
}, "wildKey")
if err != nil {
t.Fatal(err)
}
t.Log("key:", string(key))
a.IsTrue(string(key) == "https://*.example.com/index.html")
}
}

View File

@@ -0,0 +1,503 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
"errors"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/kvstore"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/cockroachdb/pebble"
"regexp"
"strings"
"testing"
)
type KVListFileStore struct {
path string
rawStore *kvstore.Store
// tables
itemsTable *kvstore.Table[*Item]
rawIsReady bool
memCache *ttlcache.Cache[int64]
}
func NewKVListFileStore(path string, memCache *ttlcache.Cache[int64]) *KVListFileStore {
return &KVListFileStore{
path: path,
memCache: memCache,
}
}
func (this *KVListFileStore) Open() error {
var reg = regexp.MustCompile(`^(.+)/([\w-]+)(\.store)$`)
var matches = reg.FindStringSubmatch(this.path)
if len(matches) != 4 {
return errors.New("invalid path '" + this.path + "'")
}
var dir = matches[1]
var name = matches[2]
rawStore, err := kvstore.OpenStoreDir(dir, name)
if err != nil {
return err
}
this.rawStore = rawStore
db, err := rawStore.NewDB("cache")
if err != nil {
return err
}
{
table, tableErr := kvstore.NewTable[*Item]("items", NewItemKVEncoder[*Item]())
if tableErr != nil {
return tableErr
}
err = table.AddFields("staleAt", "key", "wildKey", "createdAt")
if err != nil {
return err
}
db.AddTable(table)
this.itemsTable = table
}
this.rawIsReady = true
return nil
}
func (this *KVListFileStore) Path() string {
return this.path
}
func (this *KVListFileStore) AddItem(hash string, item *Item) error {
if !this.isReady() {
return nil
}
var currentTime = fasttime.Now().Unix()
if item.ExpiresAt <= currentTime {
return nil
}
if item.CreatedAt <= 0 {
item.CreatedAt = currentTime
}
if item.StaleAt <= 0 {
item.StaleAt = item.ExpiresAt + DefaultStaleCacheSeconds
}
return this.itemsTable.Set(hash, item)
}
func (this *KVListFileStore) ExistItem(hash string) (bool, int64, error) {
if !this.isReady() {
return false, -1, nil
}
item, err := this.itemsTable.Get(hash)
if err != nil {
if kvstore.IsNotFound(err) {
return false, -1, nil
}
return false, -1, err
}
if item == nil {
return false, -1, nil
}
if item.ExpiresAt <= fasttime.Now().Unix() {
return false, 0, nil
}
// write to cache
this.memCache.Write(hash, item.HeaderSize+item.BodySize, min(item.ExpiresAt, fasttime.Now().Unix()+3600))
return true, item.HeaderSize + item.BodySize, nil
}
func (this *KVListFileStore) ExistQuickItem(hash string) (bool, error) {
if !this.isReady() {
return false, nil
}
return this.itemsTable.Exist(hash)
}
func (this *KVListFileStore) RemoveItem(hash string) error {
if !this.isReady() {
return nil
}
return this.itemsTable.Delete(hash)
}
func (this *KVListFileStore) RemoveAllItems() error {
if !this.isReady() {
return nil
}
return this.itemsTable.Truncate()
}
func (this *KVListFileStore) PurgeItems(count int, callback func(hash string) error) (int, error) {
if !this.isReady() {
return 0, nil
}
var countFound int
var currentTime = fasttime.Now().Unix()
var hashList []string
err := this.itemsTable.
Query().
FieldAsc("staleAt").
Limit(count).
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
if item.Value.StaleAt < currentTime {
countFound++
hashList = append(hashList, item.Key)
return true, nil
}
return false, nil
})
if err != nil {
return 0, err
}
// delete items
if len(hashList) > 0 {
txErr := this.itemsTable.WriteTx(func(tx *kvstore.Tx[*Item]) error {
for _, hash := range hashList {
deleteErr := tx.Delete(hash)
if deleteErr != nil {
return deleteErr
}
this.memCache.Delete(hash)
}
return nil
})
if txErr != nil {
return 0, txErr
}
for _, hash := range hashList {
callbackErr := callback(hash)
if callbackErr != nil {
return 0, callbackErr
}
}
}
return countFound, nil
}
func (this *KVListFileStore) PurgeLFUItems(count int, callback func(hash string) error) error {
if !this.isReady() {
return nil
}
var hashList []string
err := this.itemsTable.
Query().
FieldAsc("createdAt").
Limit(count).
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value != nil {
hashList = append(hashList, item.Key)
}
return true, nil
})
if err != nil {
return err
}
// delete items
if len(hashList) > 0 {
txErr := this.itemsTable.WriteTx(func(tx *kvstore.Tx[*Item]) error {
for _, hash := range hashList {
deleteErr := tx.Delete(hash)
if deleteErr != nil {
return deleteErr
}
this.memCache.Delete(hash)
}
return nil
})
if txErr != nil {
return txErr
}
for _, hash := range hashList {
callbackErr := callback(hash)
if callbackErr != nil {
return callbackErr
}
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithPrefix(prefix string) error {
if !this.isReady() {
return nil
}
if len(prefix) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
var fieldOffset []byte
const size = 1000
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("key", prefix).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithWildcardPrefix(prefix string) error {
if !this.isReady() {
return nil
}
if len(prefix) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
var fieldOffset []byte
const size = 1000
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("wildKey", prefix).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
return nil
}
func (this *KVListFileStore) CleanItemsWithWildcardKey(key string) error {
if !this.isReady() {
return nil
}
if len(key) == 0 {
return nil
}
var currentTime = fasttime.Now().Unix()
for _, realKey := range []string{key, key + SuffixAll} {
var fieldOffset = append(this.itemsTable.FieldKey("wildKey"), '$')
fieldOffset = append(fieldOffset, realKey...)
const size = 1000
var wildKey string
if !strings.HasSuffix(realKey, SuffixAll) {
wildKey = string(append([]byte(realKey), 0, 0))
} else {
wildKey = realKey
}
for {
var count int
err := this.itemsTable.
Query().
FieldPrefix("wildKey", wildKey).
FieldOffset(fieldOffset).
Limit(size).
ForUpdate().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value == nil {
return true, nil
}
count++
fieldOffset = item.FieldKey
if item.Value.CreatedAt >= currentTime {
return true, nil
}
if item.Value.ExpiresAt == 0 {
return true, nil
}
item.Value.ExpiresAt = 0
item.Value.StaleAt = 0
setErr := tx.Set(item.Key, item.Value) // TODO improve performance
if setErr != nil {
return false, setErr
}
// remove from cache
this.memCache.Delete(item.Key)
return true, nil
})
if err != nil {
return err
}
if count < size {
break
}
}
}
return nil
}
func (this *KVListFileStore) CountItems() (int64, error) {
if !this.isReady() {
return 0, nil
}
return this.itemsTable.Count()
}
func (this *KVListFileStore) StatItems() (*Stat, error) {
if !this.isReady() {
return &Stat{}, nil
}
var stat = &Stat{}
err := this.itemsTable.
Query().
FindAll(func(tx *kvstore.Tx[*Item], item kvstore.Item[*Item]) (goNext bool, err error) {
if item.Value != nil {
stat.Size += item.Value.Size()
stat.ValueSize += item.Value.BodySize
stat.Count++
}
return true, nil
})
return stat, err
}
func (this *KVListFileStore) TestInspect(t *testing.T) error {
if !this.isReady() {
return nil
}
it, err := this.rawStore.RawDB().NewIter(&pebble.IterOptions{})
if err != nil {
return err
}
defer func() {
_ = it.Close()
}()
for it.First(); it.Valid(); it.Next() {
valueBytes, valueErr := it.ValueAndErr()
if valueErr != nil {
return valueErr
}
t.Log(string(it.Key()), "=>", string(valueBytes))
}
return nil
}
func (this *KVListFileStore) Close() error {
if this.rawStore != nil {
return this.rawStore.Close()
}
return nil
}
func (this *KVListFileStore) isReady() bool {
return this.rawIsReady && !this.rawStore.IsClosed()
}

View File

@@ -0,0 +1,432 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/rand"
"strconv"
"sync"
"testing"
"time"
)
var testingKVList *caches.KVFileList
func testOpenKVFileList(t *testing.T) *caches.KVFileList {
var list = caches.NewKVFileList(Tea.Root + "/data/stores/cache-stores")
err := list.Init()
if err != nil {
t.Fatal(err)
}
testingKVList = list
return list
}
func TestNewKVFileList(t *testing.T) {
var list = testOpenKVFileList(t)
err := list.Close()
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Add(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.Add(stringutil.Md5("123456"), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://example.com/index.html",
ExpiresAt: time.Now().Unix() + 60,
StaleAt: 0,
HeaderSize: 0,
BodySize: 4096,
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Add_Many(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
const start = 0
const count = 1_000_000
const concurrent = 100
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fs", costSeconds), "qps:", fmt.Sprintf("%.2fK/s", float64(count)/1000/costSeconds))
}()
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for c := 0; c < concurrent; c++ {
go func(c int) {
defer wg.Done()
var segmentStart = start + count/concurrent*c
for i := segmentStart; i < segmentStart+count/concurrent; i++ {
err := list.Add(stringutil.Md5(strconv.Itoa(i)), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://www.example.com/index.html" + strconv.Itoa(i),
ExpiresAt: time.Now().Unix() + 3600,
StaleAt: 0,
HeaderSize: 0,
BodySize: int64(rand.Int() % 1_000_000),
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Log(err)
}
}
}(c)
}
wg.Wait()
}
func TestKVFileList_Add_Many_Suffix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
const start = 0
const count = 1000
const concurrent = 100
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fs", costSeconds), "qps:", fmt.Sprintf("%.2fK/s", float64(count)/1000/costSeconds))
}()
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for c := 0; c < concurrent; c++ {
go func(c int) {
defer wg.Done()
var segmentStart = start + count/concurrent*c
for i := segmentStart; i < segmentStart+count/concurrent; i++ {
err := list.Add(stringutil.Md5(strconv.Itoa(i)+caches.SuffixAll), &caches.Item{
Type: caches.ItemTypeFile,
Key: "https://www.example.com/index.html" + strconv.Itoa(i) + caches.SuffixAll + "zip",
ExpiresAt: time.Now().Unix() + 60,
StaleAt: 0,
HeaderSize: 0,
BodySize: int64(rand.Int() % 1_000_000),
MetaSize: 0,
Host: "",
ServerId: 1,
Week: 0,
})
if err != nil {
t.Log(err)
}
}
}(c)
}
wg.Wait()
}
func TestKVFileList_Exist(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
b, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "=>", b)
}
}
func TestKVFileList_ExistMany(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var countFound int
var count = 10
if testutils.IsSingleTesting() {
count = 2_000_000
}
var before = time.Now()
for i := 0; i < count; i++ {
ok, _, err := list.Exist(stringutil.Md5(strconv.Itoa(i)))
if err != nil {
t.Fatal(err)
}
if ok {
countFound++
}
}
var costSeconds = time.Since(before).Seconds()
t.Log("total:", costSeconds*1000, "ms", "found:", countFound, "qps:", fmt.Sprintf("%.2fK/s", float64(count)/costSeconds/1000), "per read:", fmt.Sprintf("%.4fms", costSeconds*1000/float64(count)))
}
func TestKVFileList_ExistQuick(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
b, err := list.ExistQuick(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "=>", b)
}
}
func TestKVFileList_Remove(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
for _, hash := range []string{
stringutil.Md5("123456"),
stringutil.Md5("654321"),
} {
err := list.Remove(hash)
if err != nil {
t.Fatal(err)
}
}
}
func TestKVFileList_RemoveMany(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var count = 10
if testutils.IsSingleTesting() {
count = 2_000_000
}
var before = time.Now()
for i := 0; i < count; i++ {
err := list.Remove(stringutil.Md5(strconv.Itoa(i)))
if err != nil {
t.Fatal(err)
}
}
var costSeconds = time.Since(before).Seconds()
t.Log("total:", costSeconds*1000, "ms", "qps:", fmt.Sprintf("%.2fK/s", float64(count)/costSeconds/1000), "per delete:", fmt.Sprintf("%.4fms", costSeconds*1000/float64(count)))
}
func TestKVFileList_CleanAll(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.CleanAll()
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Inspect(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
err := list.TestInspect(t)
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_Purge(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
count, err := list.Purge(4_000, func(hash string) error {
//t.Log("hash:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "count:", count)
}
func TestKVFileList_PurgeLFU(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
err := list.PurgeLFU(20000, func(hash string) error {
t.Log("hash:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000))
}
func TestKVFileList_Count(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "count:", count)
}
func TestKVFileList_Stat(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
stat, err := list.Stat(func(hash string) bool {
return true
})
if err != nil {
t.Fatal(err)
}
t.Log("cost:", fmt.Sprintf("%.2fms", time.Since(before).Seconds()*1000), "stat:", fmt.Sprintf("%+v", stat))
}
func TestKVFileList_CleanPrefix(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanPrefix("https://www.example.com/index.html")
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_CleanMatchPrefix(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanMatchPrefix("https://*.example.com/index.html")
if err != nil {
t.Fatal(err)
}
}
func TestKVFileList_CleanMatchKey(t *testing.T) {
var list = testOpenKVFileList(t)
defer func() {
_ = list.Close()
}()
var before = time.Now()
defer func() {
var costSeconds = time.Since(before).Seconds()
t.Log("cost:", fmt.Sprintf("%.2fms", costSeconds*1000))
}()
err := list.CleanMatchKey("https://*.example.com/index.html123")
if err != nil {
t.Fatal(err)
}
}
func BenchmarkKVFileList_Exist(b *testing.B) {
var list = caches.NewKVFileList(Tea.Root + "/data/stores/cache-stores")
err := list.Init()
if err != nil {
b.Fatal(err)
}
defer func() {
_ = list.Close()
}()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _, existErr := list.Exist(stringutil.Md5(strconv.Itoa(rand.Int() % 2_000_000)))
if existErr != nil {
b.Fatal(existErr)
}
}
})
}

View File

@@ -0,0 +1,585 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"database/sql"
"errors"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/dbs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/ttlcache"
"github.com/TeaOSLab/EdgeNode/internal/utils/zero"
"github.com/iwind/TeaGo/types"
"os"
"strings"
"sync"
"time"
)
const CountFileDB = 20
// SQLiteFileList 文件缓存列表管理
type SQLiteFileList struct {
dir string
dbList [CountFileDB]*SQLiteFileListDB
onAdd func(item *Item)
onRemove func(item *Item)
memoryCache *ttlcache.Cache[zero.Zero]
// 老数据库地址
oldDir string
}
func NewSQLiteFileList(dir string) ListInterface {
return &SQLiteFileList{
dir: dir,
memoryCache: ttlcache.NewCache[zero.Zero](),
}
}
func (this *SQLiteFileList) SetOldDir(oldDir string) {
this.oldDir = oldDir
}
func (this *SQLiteFileList) Init() error {
// 检查目录是否存在
_, err := os.Stat(this.dir)
if err != nil {
err = os.MkdirAll(this.dir, 0777)
if err != nil {
return err
}
remotelogs.Println("CACHE", "create cache dir '"+this.dir+"'")
}
var dir = this.dir
if dir == "/" {
// 防止sqlite提示authority错误
dir = ""
}
remotelogs.Println("CACHE", "loading database from '"+dir+"' ...")
var wg = &sync.WaitGroup{}
var locker = sync.Mutex{}
var lastErr error
for i := 0; i < CountFileDB; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
var db = NewSQLiteFileListDB()
dbErr := db.Open(dir + "/db-" + types.String(i) + ".db")
if dbErr != nil {
lastErr = dbErr
return
}
dbErr = db.Init()
if dbErr != nil {
lastErr = dbErr
return
}
locker.Lock()
this.dbList[i] = db
locker.Unlock()
}(i)
}
wg.Wait()
if lastErr != nil {
return lastErr
}
// 升级老版本数据库
goman.New(func() {
this.upgradeOldDB()
})
return nil
}
func (this *SQLiteFileList) Reset() error {
// 不做任何事情
return nil
}
func (this *SQLiteFileList) Add(hash string, item *Item) error {
var db = this.GetDB(hash)
if !db.IsReady() {
return nil
}
err := db.AddSync(hash, item)
if err != nil {
return err
}
this.memoryCache.Write(hash, zero.Zero{}, this.maxExpiresAtForMemoryCache(item.ExpiresAt))
if this.onAdd != nil {
this.onAdd(item)
}
return nil
}
func (this *SQLiteFileList) Exist(hash string) (bool, int64, error) {
var db = this.GetDB(hash)
if !db.IsReady() {
return false, -1, nil
}
// 如果Hash列表里不存在那么必然不存在
if !db.hashMap.Exist(hash) {
return false, -1, nil
}
var item = this.memoryCache.Read(hash)
if item != nil {
return true, -1, nil
}
var row = db.existsByHashStmt.QueryRow(hash, time.Now().Unix())
if row.Err() != nil {
return false, -1, nil
}
var expiredAt int64
err := row.Scan(&expiredAt)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
err = nil
}
return false, -1, err
}
if expiredAt <= fasttime.Now().Unix() {
return false, -1, nil
}
this.memoryCache.Write(hash, zero.Zero{}, this.maxExpiresAtForMemoryCache(expiredAt))
return true, -1, nil
}
func (this *SQLiteFileList) ExistQuick(hash string) (isReady bool, found bool) {
var db = this.GetDB(hash)
if !db.IsReady() || !db.HashMapIsLoaded() {
return
}
isReady = true
found = db.hashMap.Exist(hash)
return
}
// CleanPrefix 清理某个前缀的缓存数据
func (this *SQLiteFileList) CleanPrefix(prefix string) error {
if len(prefix) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanPrefix(prefix)
if err != nil {
return err
}
}
return nil
}
// CleanMatchKey 清理通配符匹配的缓存数据,类似于 https://*.example.com/hello
func (this *SQLiteFileList) CleanMatchKey(key string) error {
if len(key) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanMatchKey(key)
if err != nil {
return err
}
}
return nil
}
// CleanMatchPrefix 清理通配符匹配的缓存数据,类似于 https://*.example.com/prefix/
func (this *SQLiteFileList) CleanMatchPrefix(prefix string) error {
if len(prefix) == 0 {
return nil
}
defer func() {
// TODO 需要优化
this.memoryCache.Clean()
}()
for _, db := range this.dbList {
err := db.CleanMatchPrefix(prefix)
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) Remove(hash string) error {
_, err := this.remove(hash, false)
return err
}
// Purge 清理过期的缓存
// count 每次遍历的最大数量,控制此数字可以保证每次清理的时候不用花太多时间
// callback 每次发现过期key的调用
func (this *SQLiteFileList) Purge(count int, callback func(hash string) error) (int, error) {
count /= CountFileDB
if count <= 0 {
count = 100
}
var countFound = 0
for _, db := range this.dbList {
hashStrings, err := db.ListExpiredItems(count)
if err != nil {
return 0, nil
}
if len(hashStrings) == 0 {
continue
}
countFound += len(hashStrings)
// 不在 rows.Next() 循环中操作是为了避免死锁
for _, hash := range hashStrings {
_, err = this.remove(hash, true)
if err != nil {
return 0, err
}
err = callback(hash)
if err != nil {
return 0, err
}
}
_, err = db.writeDB.Exec(`DELETE FROM "cacheItems" WHERE "hash" IN ('` + strings.Join(hashStrings, "', '") + `')`)
if err != nil {
return 0, err
}
}
return countFound, nil
}
func (this *SQLiteFileList) PurgeLFU(count int, callback func(hash string) error) error {
count /= CountFileDB
if count <= 0 {
count = 100
}
for _, db := range this.dbList {
hashStrings, err := db.ListLFUItems(count)
if err != nil {
return err
}
if len(hashStrings) == 0 {
continue
}
// 不在 rows.Next() 循环中操作是为了避免死锁
for _, hash := range hashStrings {
_, err = this.remove(hash, true)
if err != nil {
return err
}
err = callback(hash)
if err != nil {
return err
}
}
_, err = db.writeDB.Exec(`DELETE FROM "cacheItems" WHERE "hash" IN ('` + strings.Join(hashStrings, "', '") + `')`)
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) CleanAll() error {
defer this.memoryCache.Clean()
for _, db := range this.dbList {
err := db.CleanAll()
if err != nil {
return err
}
}
return nil
}
func (this *SQLiteFileList) Stat(check func(hash string) bool) (*Stat, error) {
var result = &Stat{}
for _, db := range this.dbList {
if !db.IsReady() {
return &Stat{}, nil
}
// 这里不设置过期时间、不使用 check 函数,目的是让查询更快速一些
_ = check
var row = db.statStmt.QueryRow()
if row.Err() != nil {
return nil, row.Err()
}
var stat = &Stat{}
err := row.Scan(&stat.Count, &stat.Size, &stat.ValueSize)
if err != nil {
return nil, err
}
result.Count += stat.Count
result.Size += stat.Size
result.ValueSize += stat.ValueSize
}
return result, nil
}
// Count 总数量
// 常用的方法,所以避免直接查询数据库
func (this *SQLiteFileList) Count() (int64, error) {
var total int64
for _, db := range this.dbList {
count, err := db.Total()
if err != nil {
return 0, err
}
total += count
}
return total, nil
}
// IncreaseHit 增加点击量
func (this *SQLiteFileList) IncreaseHit(hash string) error {
var db = this.GetDB(hash)
if !db.IsReady() {
return nil
}
return db.IncreaseHitAsync(hash)
}
// OnAdd 添加事件
func (this *SQLiteFileList) OnAdd(f func(item *Item)) {
this.onAdd = f
}
// OnRemove 删除事件
func (this *SQLiteFileList) OnRemove(f func(item *Item)) {
this.onRemove = f
}
func (this *SQLiteFileList) Close() error {
this.memoryCache.Destroy()
var group = goman.NewTaskGroup()
for _, db := range this.dbList {
var dbCopy = db
group.Run(func() {
if dbCopy != nil {
_ = dbCopy.Close()
}
})
}
group.Wait()
return nil
}
func (this *SQLiteFileList) GetDBIndex(hash string) uint64 {
return fnv.HashString(hash) % CountFileDB
}
func (this *SQLiteFileList) GetDB(hash string) *SQLiteFileListDB {
return this.dbList[fnv.HashString(hash)%CountFileDB]
}
func (this *SQLiteFileList) HashMapIsLoaded() bool {
for _, db := range this.dbList {
if !db.HashMapIsLoaded() {
return false
}
}
return true
}
func (this *SQLiteFileList) remove(hash string, isDeleted bool) (notFound bool, err error) {
var db = this.GetDB(hash)
if !db.IsReady() {
return false, nil
}
// HashMap中不存在则确定不存在
if !db.hashMap.Exist(hash) {
return true, nil
}
defer db.hashMap.Delete(hash)
// 从缓存中删除
this.memoryCache.Delete(hash)
if !isDeleted {
err = db.DeleteSync(hash)
if err != nil {
return false, db.WrapError(err)
}
}
if this.onRemove != nil {
// when remove file item, no any extra information needed
this.onRemove(nil)
}
return false, nil
}
// 升级老版本数据库
func (this *SQLiteFileList) upgradeOldDB() {
if len(this.oldDir) == 0 {
return
}
_ = this.UpgradeV3(this.oldDir, false)
}
func (this *SQLiteFileList) UpgradeV3(oldDir string, brokenOnError bool) error {
// index.db
var indexDBPath = oldDir + "/index.db"
_, err := os.Stat(indexDBPath)
if err != nil {
return nil
}
remotelogs.Println("CACHE", "upgrading local database from '"+oldDir+"' ...")
defer func() {
_ = fsutils.Remove(indexDBPath)
remotelogs.Println("CACHE", "upgrading local database finished")
}()
db, err := dbs.OpenWriter("file:" + indexDBPath + "?cache=shared&mode=rwc&_journal_mode=WAL&_sync=" + dbs.SyncMode + "&_locking_mode=EXCLUSIVE")
if err != nil {
return err
}
defer func() {
_ = db.Close()
}()
var isFinished = false
var offset = 0
var count = 10000
for {
if isFinished {
break
}
err = func() error {
defer func() {
offset += count
}()
rows, err := db.Query(`SELECT "hash", "key", "headerSize", "bodySize", "metaSize", "expiredAt", "staleAt", "createdAt", "host", "serverId" FROM "cacheItems_v3" ORDER BY "id" ASC LIMIT ?, ?`, offset, count)
if err != nil {
return err
}
defer func() {
_ = rows.Close()
}()
var hash = ""
var key = ""
var headerSize int64
var bodySize int64
var metaSize int64
var expiredAt int64
var staleAt int64
var createdAt int64
var host string
var serverId int64
isFinished = true
for rows.Next() {
isFinished = false
err = rows.Scan(&hash, &key, &headerSize, &bodySize, &metaSize, &expiredAt, &staleAt, &createdAt, &host, &serverId)
if err != nil {
if brokenOnError {
return err
}
return nil
}
err = this.Add(hash, &Item{
Type: ItemTypeFile,
Key: key,
ExpiresAt: expiredAt,
StaleAt: staleAt,
HeaderSize: headerSize,
BodySize: bodySize,
MetaSize: metaSize,
Host: host,
ServerId: serverId,
})
if err != nil {
if brokenOnError {
return err
}
}
}
return nil
}()
if err != nil {
return err
}
time.Sleep(1 * time.Second)
}
return nil
}
func (this *SQLiteFileList) maxExpiresAtForMemoryCache(expiresAt int64) int64 {
var maxTimestamp = fasttime.Now().Unix() + 3600
if expiresAt > maxTimestamp {
return maxTimestamp
}
return expiresAt
}

View File

@@ -0,0 +1,447 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"strconv"
"sync"
"testing"
"time"
)
func TestFileList_Init(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
t.Log("ok")
}
func TestFileList_Add(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
var hash = stringutil.Md5("123456")
t.Log("db index:", list.GetDBIndex(hash))
err = list.Add(hash, &caches.Item{
Key: "123456",
ExpiresAt: time.Now().Unix() + 1,
HeaderSize: 1,
MetaSize: 2,
BodySize: 3,
Host: "teaos.cn",
ServerId: 1,
})
if err != nil {
t.Fatal(err)
}
t.Log(list.Exist(hash))
t.Log("ok")
}
func TestFileList_Add_Many(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var before = time.Now()
const offset = 0
const count = 1_000_000
for i := offset; i < offset+count; i++ {
u := "https://edge.teaos.cn/123456" + strconv.Itoa(i)
_ = list.Add(stringutil.Md5(u), &caches.Item{
Key: u,
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1,
MetaSize: 2,
BodySize: 3,
})
if err != nil {
t.Fatal(err)
}
if i > 0 && i%10_000 == 0 {
t.Log(i, int(10000/time.Since(before).Seconds()), "qps")
before = time.Now()
}
}
t.Log("ok")
}
func TestFileList_Exist(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
total, _ := list.Count()
t.Log("total:", total)
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
{
var hash = stringutil.Md5("123456")
exists, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "exists:", exists)
}
{
var hash = stringutil.Md5("http://edge.teaos.cn/1234561")
exists, _, err := list.Exist(hash)
if err != nil {
t.Fatal(err)
}
t.Log(hash, "exists:", exists)
}
}
func TestFileList_Exist_Many_DB(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
// 测试在多个数据库下的性能
var listSlice = []caches.ListInterface{}
for i := 1; i <= 10; i++ {
var list = caches.NewSQLiteFileList(Tea.Root + "/data/data" + strconv.Itoa(i))
err := list.Init()
if err != nil {
t.Fatal(err)
}
listSlice = append(listSlice, list)
}
defer func() {
for _, list := range listSlice {
_ = list.Close()
}
}()
var wg = sync.WaitGroup{}
var threads = 8
wg.Add(threads)
var count = 200_000
var countLocker sync.Mutex
var tasks = make(chan int, count)
for i := 0; i < count; i++ {
tasks <- i
}
var hash = stringutil.Md5("http://edge.teaos.cn/1234561")
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
for i := 0; i < threads; i++ {
goman.New(func() {
defer wg.Done()
for {
select {
case <-tasks:
countLocker.Lock()
count--
countLocker.Unlock()
var list = listSlice[rands.Int(0, len(listSlice)-1)]
_, _, _ = list.Exist(hash)
default:
return
}
}
})
}
wg.Wait()
t.Log("left:", count)
}
func TestFileList_CleanPrefix(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
err = list.CleanPrefix("123")
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestFileList_Remove(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
list.OnRemove(func(item *caches.Item) {
t.Logf("remove %#v", item)
})
err = list.Remove(stringutil.Md5("123456"))
if err != nil {
t.Fatal(err)
}
t.Log("ok")
t.Log("===count===")
t.Log(list.Count())
}
func TestFileList_Purge(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var count = 0
_, err = list.Purge(caches.CountFileDB*2, func(hash string) error {
t.Log(hash)
count++
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok, purged", count, "items")
}
func TestFileList_PurgeLFU(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var count = 0
err = list.PurgeLFU(caches.CountFileDB*2, func(hash string) error {
t.Log(hash)
count++
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok, purged", count, "items")
}
func TestFileList_Stat(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
stat, err := list.Stat(nil)
if err != nil {
t.Fatal(err)
}
t.Log("count:", stat.Count, "size:", stat.Size, "valueSize:", stat.ValueSize)
}
func TestFileList_Count(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
var before = time.Now()
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count:", count)
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestFileList_CleanAll(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
err = list.CleanAll()
if err != nil {
t.Fatal(err)
}
t.Log("ok")
t.Log(list.Count())
}
func TestFileList_UpgradeV3(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p43").(*caches.SQLiteFileList)
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
t.Fatal(err)
}
defer func() {
_ = list.Close()
}()
err = list.UpgradeV3("/Users/WorkSpace/EdgeProject/EdgeCache/p43", false)
if err != nil {
t.Log(err)
return
}
t.Log("ok")
}
func BenchmarkFileList_Exist(b *testing.B) {
if !testutils.IsSingleTesting() {
return
}
var list = caches.NewSQLiteFileList(Tea.Root + "/data/cache-index/p1")
defer func() {
_ = list.Close()
}()
err := list.Init()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, _ = list.Exist("f0eb5b87e0b0041f3917002c0707475f" + types.String(i))
}
}

View File

@@ -0,0 +1,56 @@
// Copyright 2021 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
type ListInterface interface {
// Init 初始化
Init() error
// Reset 重置数据
Reset() error
// Add 添加内容
Add(hash string, item *Item) error
// Exist 检查内容是否存在
Exist(hash string) (ok bool, size int64, err error)
// CleanPrefix 清除某个前缀的缓存
CleanPrefix(prefix string) error
// CleanMatchKey 清除通配符匹配的Key
CleanMatchKey(key string) error
// CleanMatchPrefix 清除通配符匹配的前缀
CleanMatchPrefix(prefix string) error
// Remove 删除内容
Remove(hash string) error
// Purge 清理过期数据
Purge(count int, callback func(hash string) error) (int, error)
// PurgeLFU 清理LFU数据
PurgeLFU(count int, callback func(hash string) error) error
// CleanAll 清除所有缓存
CleanAll() error
// Stat 统计
Stat(check func(hash string) bool) (*Stat, error)
// Count 总数量
Count() (int64, error)
// OnAdd 添加事件
OnAdd(f func(item *Item))
// OnRemove 删除事件
OnRemove(f func(item *Item))
// Close 关闭
Close() error
// IncreaseHit 增加点击量
IncreaseHit(hash string) error
}

View File

@@ -0,0 +1,437 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/configutils"
"github.com/iwind/TeaGo/logs"
"net"
"net/url"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
)
// MemoryList 内存缓存列表管理
type MemoryList struct {
count int64
itemMaps map[string]map[string]*Item // prefix => { hash => item }
prefixes []string
locker sync.RWMutex
onAdd func(item *Item)
onRemove func(item *Item)
purgeIndex int
}
func NewMemoryList() ListInterface {
return &MemoryList{
itemMaps: map[string]map[string]*Item{},
}
}
func (this *MemoryList) Init() error {
this.prefixes = []string{"000"}
for i := 100; i <= 999; i++ {
this.prefixes = append(this.prefixes, strconv.Itoa(i))
}
for _, prefix := range this.prefixes {
this.itemMaps[prefix] = map[string]*Item{}
}
return nil
}
func (this *MemoryList) Reset() error {
this.locker.Lock()
for key := range this.itemMaps {
this.itemMaps[key] = map[string]*Item{}
}
this.locker.Unlock()
atomic.StoreInt64(&this.count, 0)
return nil
}
func (this *MemoryList) Add(hash string, item *Item) error {
this.locker.Lock()
prefix := this.prefix(hash)
itemMap, ok := this.itemMaps[prefix]
if !ok {
itemMap = map[string]*Item{}
this.itemMaps[prefix] = itemMap
}
// 先删除,为了可以正确触发统计
oldItem, ok := itemMap[hash]
if ok {
// 回调
if this.onRemove != nil {
this.onRemove(oldItem)
}
} else {
atomic.AddInt64(&this.count, 1)
}
// 添加
if this.onAdd != nil {
this.onAdd(item)
}
itemMap[hash] = item
this.locker.Unlock()
return nil
}
func (this *MemoryList) Exist(hash string) (bool, int64, error) {
this.locker.RLock()
defer this.locker.RUnlock()
prefix := this.prefix(hash)
itemMap, ok := this.itemMaps[prefix]
if !ok {
return false, -1, nil
}
item, ok := itemMap[hash]
if !ok {
return false, -1, nil
}
return !item.IsExpired(), -1, nil
}
// CleanPrefix 根据前缀进行清除
func (this *MemoryList) CleanPrefix(prefix string) error {
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if strings.HasPrefix(item.Key, prefix) {
item.ExpiresAt = 0
}
}
}
return nil
}
// CleanMatchKey 清理通配符匹配的缓存数据,类似于 https://*.example.com/hello
func (this *MemoryList) CleanMatchKey(key string) error {
if strings.Contains(key, SuffixAll) {
return nil
}
u, err := url.Parse(key)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
var requestURI = u.RequestURI()
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if configutils.MatchDomain(host, item.Host) {
var itemRequestURI = item.RequestURI()
if itemRequestURI == requestURI || strings.HasPrefix(itemRequestURI, requestURI+SuffixAll) {
item.ExpiresAt = 0
}
}
}
}
return nil
}
// CleanMatchPrefix 清理通配符匹配的缓存数据,类似于 https://*.example.com/prefix/
func (this *MemoryList) CleanMatchPrefix(prefix string) error {
u, err := url.Parse(prefix)
if err != nil {
return nil
}
var host = u.Host
hostPart, _, err := net.SplitHostPort(host)
if err == nil && len(hostPart) > 0 {
host = hostPart
}
if len(host) == 0 {
return nil
}
var requestURI = u.RequestURI()
var isRootPath = requestURI == "/"
this.locker.RLock()
defer this.locker.RUnlock()
// TODO 需要优化性能支持千万级数据低于1s的处理速度
for _, itemMap := range this.itemMaps {
for _, item := range itemMap {
if configutils.MatchDomain(host, item.Host) {
var itemRequestURI = item.RequestURI()
if isRootPath || strings.HasPrefix(itemRequestURI, requestURI) {
item.ExpiresAt = 0
}
}
}
}
return nil
}
func (this *MemoryList) Remove(hash string) error {
this.locker.Lock()
itemMap, ok := this.itemMaps[this.prefix(hash)]
if !ok {
this.locker.Unlock()
return nil
}
item, ok := itemMap[hash]
if ok {
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
}
this.locker.Unlock()
return nil
}
// Purge 清理过期的缓存
// count 每次遍历的最大数量,控制此数字可以保证每次清理的时候不用花太多时间
// callback 每次发现过期key的调用
func (this *MemoryList) Purge(count int, callback func(hash string) error) (int, error) {
this.locker.Lock()
var deletedHashList = []string{}
if this.purgeIndex >= len(this.prefixes) {
this.purgeIndex = 0
}
var prefix = this.prefixes[this.purgeIndex]
this.purgeIndex++
itemMap, ok := this.itemMaps[prefix]
if !ok {
this.locker.Unlock()
return 0, nil
}
var countFound = 0
for hash, item := range itemMap {
if count <= 0 {
break
}
if item.IsExpired() {
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
deletedHashList = append(deletedHashList, hash)
countFound++
}
count--
}
this.locker.Unlock()
// 执行外部操作
for _, hash := range deletedHashList {
if callback != nil {
err := callback(hash)
if err != nil {
return 0, err
}
}
}
return countFound, nil
}
func (this *MemoryList) PurgeLFU(count int, callback func(hash string) error) error {
if count <= 0 {
return nil
}
var deletedHashList = []string{}
var week = currentWeek()
var round = 0
this.locker.Lock()
Loop:
for {
var found = false
round++
for _, itemMap := range this.itemMaps {
for hash, item := range itemMap {
found = true
if week-item.Week <= 1 /** 最近有在使用 **/ && round <= 3 /** 查找轮数过多还不满足数量要求的就不再限制 **/ {
continue
}
if this.onRemove != nil {
this.onRemove(item)
}
atomic.AddInt64(&this.count, -1)
delete(itemMap, hash)
deletedHashList = append(deletedHashList, hash)
count--
if count <= 0 {
break Loop
}
break
}
}
if !found {
break
}
}
this.locker.Unlock()
// 执行外部操作
for _, hash := range deletedHashList {
if callback != nil {
err := callback(hash)
if err != nil {
return err
}
}
}
return nil
}
func (this *MemoryList) CleanAll() error {
return this.Reset()
}
func (this *MemoryList) Stat(check func(hash string) bool) (*Stat, error) {
this.locker.RLock()
defer this.locker.RUnlock()
result := &Stat{
Count: 0,
Size: 0,
}
for _, itemMap := range this.itemMaps {
for hash, item := range itemMap {
if !item.IsExpired() {
// 检查文件是否存在、内容是否正确等
if check != nil && check(hash) {
result.Count++
result.ValueSize += item.Size()
result.Size += item.TotalSize()
}
}
}
}
return result, nil
}
// Count 总数量
func (this *MemoryList) Count() (int64, error) {
var count = atomic.LoadInt64(&this.count)
return count, nil
}
// OnAdd 添加事件
func (this *MemoryList) OnAdd(f func(item *Item)) {
this.onAdd = f
}
// OnRemove 删除事件
func (this *MemoryList) OnRemove(f func(item *Item)) {
this.onRemove = f
}
func (this *MemoryList) Close() error {
return nil
}
// IncreaseHit 增加点击量
func (this *MemoryList) IncreaseHit(hash string) error {
this.locker.Lock()
itemMap, ok := this.itemMaps[this.prefix(hash)]
if !ok {
this.locker.Unlock()
return nil
}
item, ok := itemMap[hash]
if ok {
item.Week = currentWeek()
}
this.locker.Unlock()
return nil
}
func (this *MemoryList) Prefixes() []string {
return this.prefixes
}
func (this *MemoryList) ItemMaps() map[string]map[string]*Item {
return this.itemMaps
}
func (this *MemoryList) PurgeIndex() int {
return this.purgeIndex
}
func (this *MemoryList) Print(t *testing.T) {
this.locker.Lock()
for _, itemMap := range this.itemMaps {
if len(itemMap) > 0 {
logs.PrintAsJSON(itemMap, t)
}
}
this.locker.Unlock()
}
func (this *MemoryList) prefix(hash string) string {
var prefix string
if len(hash) > 3 {
prefix = hash[:3]
} else {
prefix = hash
}
_, ok := this.itemMaps[prefix]
if !ok {
prefix = "000"
}
return prefix
}

View File

@@ -0,0 +1,327 @@
package caches_test
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/cespare/xxhash/v2"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/rands"
"github.com/iwind/TeaGo/types"
stringutil "github.com/iwind/TeaGo/utils/string"
"math/rand"
"sort"
"strconv"
"testing"
"time"
)
func TestMemoryList_Add(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("123456", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
t.Log(list.Prefixes())
logs.PrintAsJSON(list.ItemMaps(), t)
t.Log(list.Count())
}
func TestMemoryList_Remove(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Remove("b")
list.Print(t)
t.Log(list.Count())
}
func TestMemoryList_Purge(t *testing.T) {
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("c", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix() - 3600,
HeaderSize: 1024,
})
_ = list.Add("d", &caches.Item{
Key: "d1",
ExpiresAt: time.Now().Unix() - 2,
HeaderSize: 1024,
})
_, _ = list.Purge(100, func(hash string) error {
t.Log("delete:", hash)
return nil
})
list.Print(t)
for i := 0; i < 1000; i++ {
_, _ = list.Purge(100, func(hash string) error {
t.Log("delete:", hash)
return nil
})
t.Log(list.PurgeIndex())
}
t.Log(list.Count())
}
func TestMemoryList_Purge_Large_List(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
var count = 100
if testutils.IsSingleTesting() {
count = 1_000_000
}
for i := 0; i < count; i++ {
_ = list.Add("a"+strconv.Itoa(i), &caches.Item{
Key: "a" + strconv.Itoa(i),
ExpiresAt: time.Now().Unix() + int64(rands.Int(0, 24*3600)),
HeaderSize: 1024,
})
}
if testutils.IsSingleTesting() {
time.Sleep(1 * time.Hour)
}
}
func TestMemoryList_Stat(t *testing.T) {
list := caches.NewMemoryList()
_ = list.Init()
_ = list.Add("a", &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("b", &caches.Item{
Key: "b1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
_ = list.Add("c", &caches.Item{
Key: "c1",
ExpiresAt: time.Now().Unix(),
HeaderSize: 1024,
})
_ = list.Add("d", &caches.Item{
Key: "d1",
ExpiresAt: time.Now().Unix() - 2,
HeaderSize: 1024,
})
result, _ := list.Stat(func(hash string) bool {
// 随机测试
return rand.Int()%2 == 0
})
t.Log(result)
}
func TestMemoryList_CleanPrefix(t *testing.T) {
list := caches.NewMemoryList()
_ = list.Init()
before := time.Now()
var count = 100
if testutils.IsSingleTesting() {
count = 1_000_000
}
for i := 0; i < count; i++ {
key := "https://www.teaos.cn/hello/" + strconv.Itoa(i/10000) + "/" + strconv.Itoa(i) + ".html"
_ = list.Add(fmt.Sprintf("%d", xxhash.Sum64String(key)), &caches.Item{
Key: key,
ExpiresAt: time.Now().Unix() + 3600,
BodySize: 0,
HeaderSize: 0,
})
}
t.Log(time.Since(before).Seconds()*1000, "ms")
before = time.Now()
err := list.CleanPrefix("https://www.teaos.cn/hello/10")
if err != nil {
t.Fatal(err)
}
logs.Println(list.Stat(func(hash string) bool {
return true
}))
t.Log(time.Since(before).Seconds()*1000, "ms")
}
func TestMapRandomDelete(t *testing.T) {
var countMap = map[int]int{} // k => count
var count = 1000
if testutils.IsSingleTesting() {
count = 1_000_000
}
for j := 0; j < count; j++ {
var m = map[int]bool{}
for i := 0; i < 100; i++ {
m[i] = true
}
var count = 0
for k := range m {
delete(m, k)
count++
if count >= 10 {
break
}
}
for k := range m {
countMap[k]++
}
}
var counts = []int{}
for _, count := range countMap {
counts = append(counts, count)
}
sort.Ints(counts)
t.Log("["+types.String(len(counts))+"]", counts)
}
func TestMemoryList_PurgeLFU(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
var before = time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
_ = list.Add("1", &caches.Item{})
_ = list.Add("2", &caches.Item{})
_ = list.Add("3", &caches.Item{})
_ = list.Add("4", &caches.Item{})
_ = list.Add("5", &caches.Item{})
//_ = list.IncreaseHit("1")
//_ = list.IncreaseHit("2")
//_ = list.IncreaseHit("3")
//_ = list.IncreaseHit("4")
//_ = list.IncreaseHit("5")
count, err := list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count items before purge:", count)
err = list.PurgeLFU(5, func(hash string) error {
t.Log("purge lfu:", hash)
return nil
})
if err != nil {
t.Fatal(err)
}
t.Log("ok")
count, err = list.Count()
if err != nil {
t.Fatal(err)
}
t.Log("count items left:", count)
}
func TestMemoryList_CleanAll(t *testing.T) {
var list = caches.NewMemoryList().(*caches.MemoryList)
_ = list.Add("a", &caches.Item{})
_ = list.CleanAll()
logs.PrintAsJSON(list.ItemMaps(), t)
t.Log(list.Count())
}
func TestMemoryList_GC(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
list := caches.NewMemoryList().(*caches.MemoryList)
_ = list.Init()
for i := 0; i < 1_000_000; i++ {
key := "https://www.teaos.cn/hello" + strconv.Itoa(i/100000) + "/" + strconv.Itoa(i) + ".html"
_ = list.Add(fmt.Sprintf("%d", xxhash.Sum64String(key)), &caches.Item{
Key: key,
ExpiresAt: 0,
BodySize: 0,
HeaderSize: 0,
})
}
t.Log("clean...", len(list.ItemMaps()))
_ = list.CleanAll()
t.Log("cleanAll...", len(list.ItemMaps()))
before := time.Now()
//runtime.GC()
t.Log("gc cost:", time.Since(before).Seconds()*1000, "ms")
if testutils.IsSingleTesting() {
timeout := time.NewTimer(2 * time.Minute)
<-timeout.C
t.Log("2 minutes passed")
time.Sleep(30 * time.Minute)
}
}
func BenchmarkMemoryList(b *testing.B) {
var list = caches.NewMemoryList()
err := list.Init()
if err != nil {
b.Fatal(err)
}
for i := 0; i < 1_000_000; i++ {
_ = list.Add(stringutil.Md5(types.String(i)), &caches.Item{
Key: "a1",
ExpiresAt: time.Now().Unix() + 3600,
HeaderSize: 1024,
})
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _, _ = list.Exist(types.String("a" + types.String(rands.Int(1, 10000))))
_ = list.Add("a"+types.String(rands.Int(1, 100000)), &caches.Item{})
_, _ = list.Purge(1000, func(hash string) error {
return nil
})
}
})
}

View File

@@ -0,0 +1,267 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/events"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/iwind/TeaGo/lists"
"github.com/iwind/TeaGo/types"
"strconv"
"sync"
)
var SharedManager = NewManager()
func init() {
if !teaconst.IsMain {
return
}
events.OnClose(func() {
remotelogs.Println("CACHE", "quiting cache manager")
SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{})
})
}
// Manager 缓存策略管理器
type Manager struct {
// 全局配置
MaxDiskCapacity *shared.SizeCapacity
MainDiskDir string
SubDiskDirs []*serverconfigs.CacheDir
MaxMemoryCapacity *shared.SizeCapacity
CountFileStorages int
CountMemoryStorages int
policyMap map[int64]*serverconfigs.HTTPCachePolicy // policyId => []*Policy
storageMap map[int64]StorageInterface // policyId => *Storage
locker sync.RWMutex
}
// NewManager 获取管理器对象
func NewManager() *Manager {
var m = &Manager{
policyMap: map[int64]*serverconfigs.HTTPCachePolicy{},
storageMap: map[int64]StorageInterface{},
}
return m
}
// UpdatePolicies 重新设置策略
func (this *Manager) UpdatePolicies(newPolicies []*serverconfigs.HTTPCachePolicy) {
this.locker.Lock()
defer this.locker.Unlock()
var newPolicyIds = []int64{}
for _, policy := range newPolicies {
// 使用节点单独的缓存目录
policy.UpdateDiskDir(this.MainDiskDir, this.SubDiskDirs)
newPolicyIds = append(newPolicyIds, policy.Id)
}
// 停止旧有的
for _, oldPolicy := range this.policyMap {
if !lists.ContainsInt64(newPolicyIds, oldPolicy.Id) {
remotelogs.Println("CACHE", "remove policy "+strconv.FormatInt(oldPolicy.Id, 10))
delete(this.policyMap, oldPolicy.Id)
storage, ok := this.storageMap[oldPolicy.Id]
if ok {
storage.Stop()
delete(this.storageMap, oldPolicy.Id)
}
}
}
// 启动新的
for _, newPolicy := range newPolicies {
_, ok := this.policyMap[newPolicy.Id]
if !ok {
remotelogs.Println("CACHE", "add policy "+strconv.FormatInt(newPolicy.Id, 10))
}
// 初始化
err := newPolicy.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init policy error: "+err.Error())
continue
}
this.policyMap[newPolicy.Id] = newPolicy
}
// 启动存储管理
for _, policy := range this.policyMap {
storage, ok := this.storageMap[policy.Id]
if !ok {
storage = this.NewStorageWithPolicy(policy)
if storage == nil {
remotelogs.Error("CACHE", "can not find storage type '"+policy.Type+"'")
continue
}
err := storage.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init storage failed: "+err.Error())
continue
}
this.storageMap[policy.Id] = storage
} else {
// 检查policy是否有变化
if !storage.Policy().IsSame(policy) {
// 检查是否可以直接修改
if storage.CanUpdatePolicy(policy) {
err := policy.Init()
if err != nil {
remotelogs.Error("CACHE", "reload policy '"+types.String(policy.Id)+"' failed: init policy failed: "+err.Error())
continue
}
remotelogs.Println("CACHE", "reload policy '"+types.String(policy.Id)+"'")
storage.UpdatePolicy(policy)
continue
}
remotelogs.Println("CACHE", "restart policy '"+types.String(policy.Id)+"'")
// 停止老的
storage.Stop()
delete(this.storageMap, policy.Id)
// 启动新的
storage = this.NewStorageWithPolicy(policy)
if storage == nil {
remotelogs.Error("CACHE", "can not find storage type '"+policy.Type+"'")
continue
}
err := storage.Init()
if err != nil {
remotelogs.Error("CACHE", "UpdatePolicies: init storage failed: "+err.Error())
continue
}
this.storageMap[policy.Id] = storage
}
}
}
this.CountFileStorages = 0
this.CountMemoryStorages = 0
for _, storage := range this.storageMap {
_, isFileStorage := storage.(*FileStorage)
this.CountMemoryStorages++
if isFileStorage {
this.CountFileStorages++
}
}
}
// FindPolicy 获取Policy信息
func (this *Manager) FindPolicy(policyId int64) *serverconfigs.HTTPCachePolicy {
this.locker.RLock()
defer this.locker.RUnlock()
return this.policyMap[policyId]
}
// FindStorageWithPolicy 根据策略ID查找存储
func (this *Manager) FindStorageWithPolicy(policyId int64) StorageInterface {
this.locker.RLock()
defer this.locker.RUnlock()
return this.storageMap[policyId]
}
// NewStorageWithPolicy 根据策略获取存储对象
func (this *Manager) NewStorageWithPolicy(policy *serverconfigs.HTTPCachePolicy) StorageInterface {
switch policy.Type {
case serverconfigs.CachePolicyStorageFile:
return NewFileStorage(policy)
case serverconfigs.CachePolicyStorageMemory:
return NewMemoryStorage(policy, nil)
}
return nil
}
// StorageMap 获取已有的存储对象
func (this *Manager) StorageMap() map[int64]StorageInterface {
return this.storageMap
}
// TotalMemorySize 消耗的内存尺寸
func (this *Manager) TotalMemorySize() int64 {
this.locker.RLock()
defer this.locker.RUnlock()
total := int64(0)
for _, storage := range this.storageMap {
total += storage.TotalMemorySize()
}
return total
}
// FindAllCachePaths 所有缓存路径
func (this *Manager) FindAllCachePaths() []string {
this.locker.Lock()
defer this.locker.Unlock()
var result = []string{}
for _, policy := range this.policyMap {
if policy.Type == serverconfigs.CachePolicyStorageFile {
if policy.Options != nil {
dir, ok := policy.Options["dir"]
if ok {
var dirString = types.String(dir)
if len(dirString) > 0 {
result = append(result, dirString)
}
}
}
}
}
return result
}
// FindAllStorages 读取所有缓存存储
func (this *Manager) FindAllStorages() []StorageInterface {
this.locker.Lock()
defer this.locker.Unlock()
var storages = []StorageInterface{}
for _, storage := range this.storageMap {
storages = append(storages, storage)
}
return storages
}
// ScanGarbageCaches 清理目录中“失联”的缓存文件
func (this *Manager) ScanGarbageCaches(callback func(path string) error) error {
var storages = this.FindAllStorages()
for _, storage := range storages {
fileStorage, ok := storage.(*FileStorage)
if !ok {
continue
}
err := fileStorage.ScanGarbageCaches(callback)
if err != nil {
return err
}
}
return nil
}
// MaxSystemMemoryBytesPerStorage 计算单个策略能使用的系统最大内存
func (this *Manager) MaxSystemMemoryBytesPerStorage() int64 {
var count = this.CountMemoryStorages
if count < 1 {
count = 1
}
var resultBytes = int64(memutils.SystemMemoryBytes()) / 3 / int64(count) // 1/3 of the system memory
if resultBytes < 1<<30 {
resultBytes = 1 << 30
}
return resultBytes
}

View File

@@ -0,0 +1,133 @@
package caches_test
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs/shared"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/Tea"
"testing"
)
func TestManager_UpdatePolicies(t *testing.T) {
{
var policies = []*serverconfigs.HTTPCachePolicy{}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
{
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 2,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 3,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
{
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 2,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
{
Id: 4,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
},
}
caches.SharedManager.UpdatePolicies(policies)
printManager(t)
}
}
func TestManager_ChangePolicy_Memory(t *testing.T) {
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageMemory,
Options: map[string]interface{}{},
Capacity: &shared.SizeCapacity{Count: 1, Unit: shared.SizeCapacityUnitGB},
},
}
caches.SharedManager.UpdatePolicies(policies)
caches.SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageMemory,
Options: map[string]interface{}{},
Capacity: &shared.SizeCapacity{Count: 2, Unit: shared.SizeCapacityUnitGB},
},
})
}
func TestManager_ChangePolicy_File(t *testing.T) {
var policies = []*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/data/cache-index/p1",
},
Capacity: &shared.SizeCapacity{Count: 1, Unit: shared.SizeCapacityUnitGB},
},
}
caches.SharedManager.UpdatePolicies(policies)
caches.SharedManager.UpdatePolicies([]*serverconfigs.HTTPCachePolicy{
{
Id: 1,
Type: serverconfigs.CachePolicyStorageFile,
Options: map[string]interface{}{
"dir": Tea.Root + "/data/cache-index/p1",
},
Capacity: &shared.SizeCapacity{Count: 2, Unit: shared.SizeCapacityUnitGB},
},
})
}
func TestManager_MaxSystemMemoryBytesPerStorage(t *testing.T) {
for i := 0; i < 100; i++ {
caches.SharedManager.CountMemoryStorages = i
t.Log(i, caches.SharedManager.MaxSystemMemoryBytesPerStorage()>>30, "GB")
}
}
func printManager(t *testing.T) {
t.Log("===manager==")
t.Log("storage:")
for _, storage := range caches.SharedManager.StorageMap() {
t.Log(" storage:", storage.Policy().Id)
}
t.Log("===============")
}

View File

@@ -0,0 +1,48 @@
// Copyright 2023 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !windows
package caches
import (
"fmt"
"golang.org/x/sys/unix"
)
// TotalDiskSize 消耗的磁盘尺寸
func (this *Manager) TotalDiskSize() int64 {
this.locker.RLock()
defer this.locker.RUnlock()
var total = int64(0)
var sidMap = map[string]bool{} // partition sid => bool
for _, storage := range this.storageMap {
// 这里不能直接用 storage.TotalDiskSize() 相加,因为多个缓存策略缓存目录可能处在同一个分区目录下
fileStorage, ok := storage.(*FileStorage)
if ok {
var options = fileStorage.options // copy
if options != nil {
var dir = options.Dir // copy
if len(dir) == 0 {
continue
}
var stat = &unix.Statfs_t{}
err := unix.Statfs(dir, stat)
if err != nil {
continue
}
var sid = fmt.Sprintf("%d_%d", stat.Fsid.Val[0], stat.Fsid.Val[1])
if sidMap[sid] {
continue
}
sidMap[sid] = true
total += int64(stat.Blocks-stat.Bfree) * int64(stat.Bsize) // we add extra int64() for darwin
}
}
}
if total < 0 {
total = 0
}
return total
}

View File

@@ -0,0 +1,36 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"io"
"os"
)
type OpenFile struct {
fp *os.File
meta []byte
header []byte
version int64
size int64
}
func NewOpenFile(fp *os.File, meta []byte, header []byte, version int64, size int64) *OpenFile {
return &OpenFile{
fp: fp,
meta: meta,
header: header,
version: version,
size: size,
}
}
func (this *OpenFile) SeekStart() error {
_, err := this.fp.Seek(0, io.SeekStart)
return err
}
func (this *OpenFile) Close() error {
this.meta = nil
return this.fp.Close()
}

View File

@@ -0,0 +1,210 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"fmt"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/fsnotify/fsnotify"
"github.com/iwind/TeaGo/logs"
"github.com/iwind/TeaGo/types"
"path/filepath"
"runtime"
"sync"
"time"
)
const (
maxOpenFileSize = 256 << 20
)
type OpenFileCache struct {
poolMap map[string]*OpenFilePool // file path => Pool
poolList *linkedlist.List[*OpenFilePool]
watcher *fsnotify.Watcher
locker sync.RWMutex
maxCount int
capacitySize int64
count int
usedSize int64
}
func NewOpenFileCache(maxCount int) (*OpenFileCache, error) {
if maxCount <= 0 {
maxCount = 16384
}
var cache = &OpenFileCache{
maxCount: maxCount,
poolMap: map[string]*OpenFilePool{},
poolList: linkedlist.NewList[*OpenFilePool](),
capacitySize: (int64(memutils.SystemMemoryGB()) << 30) / 16,
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
cache.watcher = watcher
goman.New(func() {
for event := range watcher.Events {
if runtime.GOOS == "linux" || event.Op&fsnotify.Chmod != fsnotify.Chmod {
cache.Close(event.Name)
}
}
})
return cache, nil
}
func (this *OpenFileCache) Get(filename string) *OpenFile {
filename = filepath.Clean(filename)
this.locker.RLock()
pool, ok := this.poolMap[filename]
this.locker.RUnlock()
if ok {
file, consumed, consumedSize := pool.Get()
if consumed {
this.locker.Lock()
this.count--
this.usedSize -= consumedSize
// pool如果为空也不需要从列表中删除避免put时需要重新创建
this.locker.Unlock()
}
return file
}
return nil
}
func (this *OpenFileCache) Put(filename string, file *OpenFile) {
filename = filepath.Clean(filename)
if file.size > maxOpenFileSize {
return
}
this.locker.Lock()
defer this.locker.Unlock()
// 如果超过当前容量,则关闭最早的
if this.count >= this.maxCount || this.usedSize+file.size >= this.capacitySize {
this.consumeHead()
return
}
pool, ok := this.poolMap[filename]
var success bool
if ok {
success = pool.Put(file)
} else {
_ = this.watcher.Add(filename)
pool = NewOpenFilePool(filename)
pool.version = file.version
this.poolMap[filename] = pool
success = pool.Put(file)
}
this.poolList.Push(pool.linkItem)
// 检查长度
if success {
this.count++
this.usedSize += file.size
}
}
func (this *OpenFileCache) Close(filename string) {
filename = filepath.Clean(filename)
this.locker.Lock()
pool, ok := this.poolMap[filename]
if ok {
// 设置关闭状态
pool.SetClosing()
delete(this.poolMap, filename)
this.poolList.Remove(pool.linkItem)
_ = this.watcher.Remove(filename)
this.count -= pool.Len()
this.usedSize -= pool.usedSize
}
this.locker.Unlock()
// 在locker之外提升性能
if ok {
pool.Close()
}
}
func (this *OpenFileCache) CloseAll() {
this.locker.Lock()
for _, pool := range this.poolMap {
pool.Close()
}
this.poolMap = map[string]*OpenFilePool{}
this.poolList.Reset()
_ = this.watcher.Close()
this.count = 0
this.usedSize = 0
this.locker.Unlock()
}
func (this *OpenFileCache) SetCapacity(capacityBytes int64) {
this.capacitySize = capacityBytes
}
func (this *OpenFileCache) Debug() {
var ticker = time.NewTicker(5 * time.Second)
goman.New(func() {
for range ticker.C {
logs.Println("==== " + types.String(this.count) + ", " + fmt.Sprintf("%.4fMB", float64(this.usedSize)/(1<<20)) + " ====")
this.poolList.Range(func(item *linkedlist.Item[*OpenFilePool]) (goNext bool) {
logs.Println(filepath.Base(item.Value.Filename()), item.Value.Len())
return true
})
}
})
}
func (this *OpenFileCache) consumeHead() {
var delta = 1
if this.count > 100 {
delta = 2
}
for i := 0; i < delta; i++ {
var head = this.poolList.Head()
if head == nil {
break
}
var headPool = head.Value
headFile, consumed, consumedSize := headPool.Get()
if consumed {
this.count--
this.usedSize -= consumedSize
if headFile != nil {
_ = headFile.Close()
}
}
if headPool.Len() == 0 {
delete(this.poolMap, headPool.filename)
this.poolList.Remove(head)
_ = this.watcher.Remove(headPool.filename)
}
}
}

View File

@@ -0,0 +1,69 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/types"
"testing"
"time"
)
func TestNewOpenFileCache_Close(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.Debug()
cache.Put("a.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Put("c.txt", caches.NewOpenFile(nil, nil, nil, 0, 1<<20))
cache.Get("b.txt")
cache.Get("d.txt") // not exist
cache.Close("a.txt")
if testutils.IsSingleTesting() {
time.Sleep(100 * time.Second)
}
}
func TestNewOpenFileCache_OverSize(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.SetCapacity(1 << 30)
cache.Debug()
for i := 0; i < 100; i++ {
cache.Put("a"+types.String(i)+".txt", caches.NewOpenFile(nil, nil, nil, 0, 128<<20))
}
if testutils.IsSingleTesting() {
time.Sleep(100 * time.Second)
}
}
func TestNewOpenFileCache_CloseAll(t *testing.T) {
cache, err := caches.NewOpenFileCache(1024)
if err != nil {
t.Fatal(err)
}
cache.Debug()
cache.Put("a.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Put("b.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Put("c.txt", caches.NewOpenFile(nil, nil, nil, 0, 1))
cache.Get("b.txt")
cache.Get("d.txt")
cache.CloseAll()
if testutils.IsSingleTesting() {
time.Sleep(6 * time.Second)
}
}

View File

@@ -0,0 +1,106 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/fasttime"
"github.com/TeaOSLab/EdgeNode/internal/utils/linkedlist"
)
type OpenFilePool struct {
c chan *OpenFile
linkItem *linkedlist.Item[*OpenFilePool]
filename string
version int64
isClosed bool
usedSize int64
}
func NewOpenFilePool(filename string) *OpenFilePool {
var pool = &OpenFilePool{
filename: filename,
c: make(chan *OpenFile, 1024),
version: fasttime.Now().UnixMilli(),
}
pool.linkItem = linkedlist.NewItem[*OpenFilePool](pool)
return pool
}
func (this *OpenFilePool) Filename() string {
return this.filename
}
func (this *OpenFilePool) Get() (resultFile *OpenFile, consumed bool, consumedSize int64) {
// 如果已经关闭,直接返回
if this.isClosed {
return nil, false, 0
}
select {
case file := <-this.c:
if file != nil {
this.usedSize -= file.size
err := file.SeekStart()
if err != nil {
_ = file.Close()
return nil, true, file.size
}
file.version = this.version
return file, true, file.size
}
return nil, false, 0
default:
return nil, false, 0
}
}
func (this *OpenFilePool) Put(file *OpenFile) bool {
// 如果已关闭,则不接受新的文件
if this.isClosed {
_ = file.Close()
return false
}
// 检查文件版本号
if this.version > 0 && file.version > 0 && file.version != this.version {
_ = file.Close()
return false
}
// 加入Pool
select {
case this.c <- file:
this.usedSize += file.size
return true
default:
// 多余的直接关闭
_ = file.Close()
return false
}
}
func (this *OpenFilePool) Len() int {
return len(this.c)
}
func (this *OpenFilePool) TotalSize() int64 {
return this.usedSize
}
func (this *OpenFilePool) SetClosing() {
this.isClosed = true
}
func (this *OpenFilePool) Close() {
this.isClosed = true
for {
select {
case file := <-this.c:
_ = file.Close()
default:
return
}
}
}

View File

@@ -0,0 +1,46 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/rands"
"sync"
"testing"
)
func TestOpenFilePool_Get(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
t.Log(pool.Filename())
t.Log(pool.Get())
t.Log(pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1)))
t.Log(pool.Get())
t.Log(pool.Get())
}
func TestOpenFilePool_Close(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
pool.Close()
}
func TestOpenFilePool_Concurrent(t *testing.T) {
var pool = caches.NewOpenFilePool("a")
var concurrent = 1000
var wg = &sync.WaitGroup{}
wg.Add(concurrent)
for i := 0; i < concurrent; i++ {
go func() {
defer wg.Done()
if rands.Int(0, 1) == 1 {
pool.Put(caches.NewOpenFile(nil, nil, nil, 0, 1))
}
if rands.Int(0, 1) == 0 {
pool.Get()
}
}()
}
wg.Wait()
}

View File

@@ -0,0 +1,270 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches
import (
"bytes"
"encoding/json"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"strconv"
)
// PartialRanges 内容分区范围定义
type PartialRanges struct {
Version int `json:"version"` // 版本号
Ranges [][2]int64 `json:"ranges"` // 范围
BodySize int64 `json:"bodySize"` // 总长度
ContentMD5 string `json:"contentMD5"` // 内容md5
}
// NewPartialRanges 获取新对象
func NewPartialRanges(expiresAt int64) *PartialRanges {
return &PartialRanges{
Ranges: [][2]int64{},
Version: 2,
}
}
// NewPartialRangesFromData 从数据中解析范围
func NewPartialRangesFromData(data []byte) (*PartialRanges, error) {
var rs = NewPartialRanges(0)
for {
var index = bytes.IndexRune(data, '\n')
if index < 0 {
break
}
var line = data[:index]
var colonIndex = bytes.IndexRune(line, ':')
if colonIndex > 0 {
switch string(line[:colonIndex]) {
case "v": // 版本号
rs.Version = types.Int(line[colonIndex+1:])
case "b": // 总长度
rs.BodySize = types.Int64(line[colonIndex+1:])
case "r": // 范围信息
var commaIndex = bytes.IndexRune(line, ',')
if commaIndex > 0 {
rs.Ranges = append(rs.Ranges, [2]int64{types.Int64(line[colonIndex+1 : commaIndex]), types.Int64(line[commaIndex+1:])})
}
case "m": // Content-MD5
rs.ContentMD5 = string(line[colonIndex+1:])
}
}
data = data[index+1:]
if len(data) == 0 {
break
}
}
return rs, nil
}
// NewPartialRangesFromJSON 从JSON中解析范围
func NewPartialRangesFromJSON(data []byte) (*PartialRanges, error) {
var rs = NewPartialRanges(0)
err := json.Unmarshal(data, &rs)
if err != nil {
return nil, err
}
rs.Version = 0
return rs, nil
}
// NewPartialRangesFromFile 从文件中加载范围信息
func NewPartialRangesFromFile(path string) (*PartialRanges, error) {
data, err := SharedPartialRangesQueue.Get(path)
if err != nil {
return nil, err
}
if len(data) == 0 {
return NewPartialRanges(0), nil
}
// 兼容老的JSON格式
if data[0] == '{' {
return NewPartialRangesFromJSON(data)
}
// 新的格式
return NewPartialRangesFromData(data)
}
// Add 添加新范围
func (this *PartialRanges) Add(begin int64, end int64) {
if begin > end {
begin, end = end, begin
}
var nr = [2]int64{begin, end}
var count = len(this.Ranges)
if count == 0 {
this.Ranges = [][2]int64{nr}
return
}
// insert
var index = -1
for i, r := range this.Ranges {
if r[0] > begin || (r[0] == begin && r[1] >= end) {
index = i
this.Ranges = append(this.Ranges, [2]int64{})
copy(this.Ranges[index+1:], this.Ranges[index:])
this.Ranges[index] = nr
break
}
}
if index == -1 {
index = count
this.Ranges = append(this.Ranges, nr)
}
this.merge(index)
}
// Contains 检查是否包含某个范围
func (this *PartialRanges) Contains(begin int64, end int64) bool {
if len(this.Ranges) == 0 {
return false
}
for _, r2 := range this.Ranges {
if r2[0] <= begin && r2[1] >= end {
return true
}
}
return false
}
// Nearest 查找最近的某个范围
func (this *PartialRanges) Nearest(begin int64, end int64) (r [2]int64, ok bool) {
if len(this.Ranges) == 0 {
return
}
for _, r2 := range this.Ranges {
if r2[0] <= begin && r2[1] > begin {
r = [2]int64{begin, this.min(end, r2[1])}
ok = true
return
}
}
return
}
// FindRangeAtPosition 查找在某个位置上的范围
func (this *PartialRanges) FindRangeAtPosition(position int64) (r rangeutils.Range, ok bool) {
if len(this.Ranges) == 0 || position < 0 {
return
}
for _, r2 := range this.Ranges {
if r2[0] <= position && r2[1] > position {
return [2]int64{position, r2[1]}, true
}
}
return
}
// 转换为字符串
func (this *PartialRanges) String() string {
var s = "v:" + strconv.Itoa(this.Version) + "\n" + // version
"b:" + this.formatInt64(this.BodySize) + "\n" // bodySize
if len(this.ContentMD5) > 0 {
s += "m:" + this.ContentMD5 + "\n" // Content-MD5
}
for _, r := range this.Ranges {
s += "r:" + this.formatInt64(r[0]) + "," + this.formatInt64(r[1]) + "\n" // range
}
return s
}
// Bytes 将内容转换为字节
func (this *PartialRanges) Bytes() []byte {
return []byte(this.String())
}
// WriteToFile 写入到文件中
func (this *PartialRanges) WriteToFile(path string) error {
SharedPartialRangesQueue.Put(path, this.Bytes())
return nil
}
// Max 获取最大位置
func (this *PartialRanges) Max() int64 {
if len(this.Ranges) > 0 {
return this.Ranges[len(this.Ranges)-1][1]
}
return 0
}
// Reset 重置范围信息
func (this *PartialRanges) Reset() {
this.Ranges = [][2]int64{}
}
// IsCompleted 是否已下载完整
func (this *PartialRanges) IsCompleted() bool {
return len(this.Ranges) == 1 && this.Ranges[0][0] == 0 && this.Ranges[0][1] == this.BodySize-1
}
func (this *PartialRanges) merge(index int) {
// forward
var lastIndex = index
for i := index; i >= 1; i-- {
var curr = this.Ranges[i]
var prev = this.Ranges[i-1]
var w1 = this.w(curr)
var w2 = this.w(prev)
if w1+w2 >= this.max(curr[1], prev[1])-this.min(curr[0], prev[0])-1 {
prev = [2]int64{this.min(curr[0], prev[0]), this.max(curr[1], prev[1])}
this.Ranges[i-1] = prev
this.Ranges = append(this.Ranges[:i], this.Ranges[i+1:]...)
lastIndex = i - 1
} else {
break
}
}
// backward
index = lastIndex
for index < len(this.Ranges)-1 {
var curr = this.Ranges[index]
var next = this.Ranges[index+1]
var w1 = this.w(curr)
var w2 = this.w(next)
if w1+w2 >= this.max(curr[1], next[1])-this.min(curr[0], next[0])-1 {
curr = [2]int64{this.min(curr[0], next[0]), this.max(curr[1], next[1])}
this.Ranges = append(this.Ranges[:index], this.Ranges[index+1:]...)
this.Ranges[index] = curr
} else {
break
}
}
}
func (this *PartialRanges) w(r [2]int64) int64 {
return r[1] - r[0]
}
func (this *PartialRanges) min(n1 int64, n2 int64) int64 {
if n1 <= n2 {
return n1
}
return n2
}
func (this *PartialRanges) max(n1 int64, n2 int64) int64 {
if n1 >= n2 {
return n1
}
return n2
}
func (this *PartialRanges) formatInt64(i int64) string {
return strconv.FormatInt(i, 10)
}

View File

@@ -0,0 +1,144 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import (
teaconst "github.com/TeaOSLab/EdgeNode/internal/const"
"github.com/TeaOSLab/EdgeNode/internal/remotelogs"
"github.com/TeaOSLab/EdgeNode/internal/utils/fnv"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/TeaOSLab/EdgeNode/internal/utils/goman"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"sync"
)
var SharedPartialRangesQueue = NewPartialRangesQueue()
func init() {
if !teaconst.IsMain {
return
}
SharedPartialRangesQueue.Start()
}
const partialRangesQueueSharding = 8
// PartialRangesQueue ranges file writing queue
type PartialRangesQueue struct {
m [partialRangesQueueSharding]map[string][]byte // { filename => data, ... }
c chan string // filename1, ...
mu [partialRangesQueueSharding]*sync.RWMutex
}
// NewPartialRangesQueue Create new queue
func NewPartialRangesQueue() *PartialRangesQueue {
var queueSize = 512
var memGB = memutils.SystemMemoryGB()
if memGB > 16 {
queueSize = 8 << 10
} else if memGB > 8 {
queueSize = 4 << 10
} else if memGB > 4 {
queueSize = 2 << 10
} else if memGB > 2 {
queueSize = 1 << 10
}
var m = [partialRangesQueueSharding]map[string][]byte{}
var muList = [partialRangesQueueSharding]*sync.RWMutex{}
for i := 0; i < partialRangesQueueSharding; i++ {
muList[i] = &sync.RWMutex{}
m[i] = map[string][]byte{}
}
return &PartialRangesQueue{
mu: muList,
m: m,
c: make(chan string, queueSize),
}
}
// Start the queue
func (this *PartialRangesQueue) Start() {
goman.New(func() {
this.Dump()
})
}
// Put ranges data to filename
func (this *PartialRangesQueue) Put(filename string, data []byte) {
var index = this.indexForKey(filename)
this.mu[index].Lock()
this.m[index][filename] = data
this.mu[index].Unlock()
// always wait to finish
this.c <- filename
}
// Get ranges data from filename
func (this *PartialRangesQueue) Get(filename string) ([]byte, error) {
var index = this.indexForKey(filename)
this.mu[index].RLock()
data, ok := this.m[index][filename]
this.mu[index].RUnlock()
if ok {
return data, nil
}
return fsutils.ReadFile(filename)
}
// Delete ranges filename
func (this *PartialRangesQueue) Delete(filename string) {
var index = this.indexForKey(filename)
this.mu[index].Lock()
delete(this.m[index], filename)
this.mu[index].Unlock()
}
// Dump ranges to filename from memory
func (this *PartialRangesQueue) Dump() {
for filename := range this.c {
var index = this.indexForKey(filename)
this.mu[index].Lock()
data, ok := this.m[index][filename]
if ok {
delete(this.m[index], filename)
}
this.mu[index].Unlock()
if !ok || len(data) == 0 {
continue
}
err := fsutils.WriteFile(filename, data, 0666)
if err != nil {
remotelogs.Println("PARTIAL_RANGES_QUEUE", "write file '"+filename+"' failed: "+err.Error())
}
}
}
// Len count all files
func (this *PartialRangesQueue) Len() int {
var count int
for i := 0; i < partialRangesQueueSharding; i++ {
this.mu[i].RLock()
count += len(this.m[i])
this.mu[i].RUnlock()
}
return count
}
func (this *PartialRangesQueue) indexForKey(filename string) int {
return int(fnv.HashString(filename) % partialRangesQueueSharding)
}

View File

@@ -0,0 +1,31 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches_test
import (
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"testing"
)
func TestNewPartialRangesQueue(t *testing.T) {
var a = assert.NewAssertion(t)
var queue = caches.NewPartialRangesQueue()
queue.Put("a", []byte{1, 2, 3})
t.Log("add 'a':", queue.Len())
t.Log(queue.Get("a"))
a.IsTrue(queue.Len() == 1)
queue.Put("a", nil)
t.Log("add 'a':", queue.Len())
a.IsTrue(queue.Len() == 1)
queue.Put("b", nil)
t.Log("add 'b':", queue.Len())
a.IsTrue(queue.Len() == 2)
queue.Delete("a")
t.Log("delete 'a':", queue.Len())
a.IsTrue(queue.Len() == 1)
}

View File

@@ -0,0 +1,239 @@
// Copyright 2022 Liuxiangchao iwind.liu@gmail.com. All rights reserved.
package caches_test
import (
"crypto/md5"
"encoding/base64"
"encoding/json"
"github.com/TeaOSLab/EdgeNode/internal/caches"
"github.com/iwind/TeaGo/assert"
"github.com/iwind/TeaGo/logs"
"testing"
"time"
)
func TestNewPartialRanges(t *testing.T) {
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(50, 300)
r.Add(30, 80)
r.Add(30, 100)
r.Add(30, 400)
r.Add(1000, 10000)
r.Add(200, 1000)
r.Add(200, 10040)
logs.PrintAsJSON(r.Ranges, t)
t.Log("max:", r.Max())
}
func TestNewPartialRanges1(t *testing.T) {
var a = assert.NewAssertion(t)
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(1, 101)
r.Add(1, 102)
r.Add(2, 103)
r.Add(200, 300)
r.Add(1, 1000)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
a.IsTrue(len(rs) == 1)
if len(rs) == 1 {
a.IsTrue(rs[0][0] == 1)
a.IsTrue(rs[0][1] == 1000)
}
}
func TestNewPartialRanges2(t *testing.T) {
// low -> high
var r = caches.NewPartialRanges(0)
r.Add(1, 100)
r.Add(1, 101)
r.Add(1, 102)
r.Add(2, 103)
r.Add(200, 300)
r.Add(301, 302)
r.Add(303, 304)
r.Add(250, 400)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
}
func TestNewPartialRanges3(t *testing.T) {
// high -> low
var r = caches.NewPartialRanges(0)
r.Add(301, 302)
r.Add(303, 304)
r.Add(200, 300)
r.Add(250, 400)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
}
func TestNewPartialRanges4(t *testing.T) {
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 302)
r.Add(303, 304)
r.Add(305, 306)
r.Add(417, 417)
r.Add(410, 415)
r.Add(400, 409)
var rs = r.Ranges
logs.PrintAsJSON(rs, t)
t.Log(r.Contains(400, 416))
}
func TestNewPartialRanges5(t *testing.T) {
var r = caches.NewPartialRanges(0)
for j := 0; j < 1000; j++ {
r.Add(int64(j), int64(j+100))
}
logs.PrintAsJSON(r.Ranges, t)
}
func TestNewPartialRanges_Nearest(t *testing.T) {
{
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 400)
r.Add(401, 500)
r.Add(501, 600)
t.Log(r.Nearest(100, 200))
t.Log(r.Nearest(300, 350))
t.Log(r.Nearest(302, 350))
}
{
// nearby
var r = caches.NewPartialRanges(0)
r.Add(301, 400)
r.Add(450, 500)
r.Add(550, 600)
t.Log(r.Nearest(100, 200))
t.Log(r.Nearest(300, 350))
t.Log(r.Nearest(302, 350))
t.Log(r.Nearest(302, 440))
t.Log(r.Nearest(302, 1000))
}
}
func TestNewPartialRanges_Large_Range(t *testing.T) {
var a = assert.NewAssertion(t)
var largeSize int64 = 10000000000000
t.Log(largeSize/1024/1024/1024, "G")
var r = caches.NewPartialRanges(0)
r.Add(1, largeSize)
var s = r.String()
t.Log(s)
r2, err := caches.NewPartialRangesFromData([]byte(s))
if err != nil {
t.Fatal(err)
}
a.IsTrue(largeSize == r2.Ranges[0][1])
logs.PrintAsJSON(r, t)
}
func TestPartialRanges_Encode_JSON(t *testing.T) {
var r = caches.NewPartialRanges(0)
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
var before = time.Now()
data, err := json.Marshal(r)
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log(len(data))
}
func TestPartialRanges_Encode_String(t *testing.T) {
var r = caches.NewPartialRanges(0)
r.BodySize = 1024
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
var sum = md5.Sum([]byte("123456"))
r.ContentMD5 = base64.StdEncoding.EncodeToString(sum[:])
var before = time.Now()
var data = r.String()
t.Log(data)
t.Log(time.Since(before).Seconds()*1000, "ms")
t.Log(len(data))
r2, err := caches.NewPartialRangesFromData([]byte(data))
if err != nil {
t.Fatal(err)
}
logs.PrintAsJSON(r2, t)
}
func TestPartialRanges_Version(t *testing.T) {
{
ranges, err := caches.NewPartialRangesFromData([]byte(`e:1668928495
r:0,1048576
r:1140260864,1140295164`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
{
ranges, err := caches.NewPartialRangesFromData([]byte(`e:1668928495
r:0,1048576
r:1140260864,1140295164
v:0
`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
{
ranges, err := caches.NewPartialRangesFromJSON([]byte(`{}`))
if err != nil {
t.Fatal(err)
}
t.Log("version:", ranges.Version)
}
}
func BenchmarkNewPartialRanges(b *testing.B) {
for i := 0; i < b.N; i++ {
var r = caches.NewPartialRanges(0)
for j := 0; j < 1000; j++ {
r.Add(int64(j), int64(j+100))
}
}
}
func BenchmarkPartialRanges_String(b *testing.B) {
var r = caches.NewPartialRanges(0)
r.BodySize = 1024
for i := 0; i < 10; i++ {
r.Ranges = append(r.Ranges, [2]int64{int64(i * 100), int64(i*100 + 100)})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = r.String()
}
}

View File

@@ -0,0 +1,52 @@
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"io"
)
type ReaderFunc func(n int) (goNext bool, err error)
type Reader interface {
// Init 初始化
Init() error
// TypeName 类型名称
TypeName() string
// ExpiresAt 过期时间
ExpiresAt() int64
// Status 状态码
Status() int
// LastModified 最后修改时间
LastModified() int64
// ReadHeader 读取Header
ReadHeader(buf []byte, callback ReaderFunc) error
// ReadBody 读取Body
ReadBody(buf []byte, callback ReaderFunc) error
// Read 实现io.Reader接口
Read(buf []byte) (int, error)
// ReadBodyRange 读取某个范围内的Body
ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error
// HeaderSize Header Size
HeaderSize() int64
// BodySize Body Size
BodySize() int64
// ContainsRange 是否包含某个区间内容
ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool)
// SetNextReader 设置下一个内容Reader
SetNextReader(nextReader io.ReadCloser)
// Close 关闭
Close() error
}

View File

@@ -0,0 +1,14 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
package caches
import "io"
type BaseReader struct {
nextReader io.ReadCloser
}
// SetNextReader 设置下一个内容Reader
func (this *BaseReader) SetNextReader(nextReader io.ReadCloser) {
this.nextReader = nextReader
}

View File

@@ -0,0 +1,428 @@
package caches
import (
"encoding/binary"
"errors"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
"os"
)
type FileReader struct {
BaseReader
fp *fsutils.File
openFile *OpenFile
openFileCache *OpenFileCache
meta []byte
header []byte
expiresAt int64
status int
headerOffset int64
headerSize int
bodySize int64
bodyOffset int64
isClosed bool
}
func NewFileReader(fp *fsutils.File) *FileReader {
return &FileReader{fp: fp}
}
func (this *FileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *FileReader) InitAutoDiscard(autoDiscard bool) error {
if this.openFile != nil {
this.meta = this.openFile.meta
this.header = this.openFile.header
}
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, err := this.readToBuff(this.fp, buf)
if err != nil {
return err
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
var status = types.Int(string(buf[OffsetStatus : OffsetStatus+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[OffsetURLLength : OffsetURLLength+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[OffsetHeaderLength : OffsetHeaderLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
var bodySize = int(binary.BigEndian.Uint64(buf[OffsetBodyLength : OffsetBodyLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
// read header
if this.openFileCache != nil && len(this.header) == 0 {
if headerSize > 0 && headerSize <= 512 {
this.header = make([]byte, headerSize)
_, err := this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
_, err = this.readToBuff(this.fp, this.header)
if err != nil {
return err
}
}
}
isOk = true
return nil
}
func (this *FileReader) TypeName() string {
return "disk"
}
func (this *FileReader) ExpiresAt() int64 {
return this.expiresAt
}
func (this *FileReader) Status() int {
return this.status
}
func (this *FileReader) LastModified() int64 {
stat, err := this.fp.Stat()
if err != nil {
return 0
}
return stat.ModTime().Unix()
}
func (this *FileReader) HeaderSize() int64 {
return int64(this.headerSize)
}
func (this *FileReader) BodySize() int64 {
return this.bodySize
}
func (this *FileReader) ReadHeader(buf []byte, callback ReaderFunc) error {
// 使用缓存
if len(this.header) > 0 && len(buf) >= len(this.header) {
copy(buf, this.header)
_, err := callback(len(this.header))
if err != nil {
return err
}
// 移动到Body位置
_, err = this.fp.Seek(this.bodyOffset, io.SeekStart)
if err != nil {
return err
}
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
_, err := this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
var headerSize = this.headerSize
for {
n, err := this.fp.Read(buf)
if n > 0 {
if n < headerSize {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
headerSize -= n
} else {
_, e := callback(headerSize)
if e != nil {
isOk = true
return e
}
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
// 移动到Body位置
_, err = this.fp.Seek(this.bodyOffset, io.SeekStart)
if err != nil {
return err
}
return nil
}
func (this *FileReader) ReadBody(buf []byte, callback ReaderFunc) error {
if this.bodySize == 0 {
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = this.bodyOffset
// 开始读Body部分
_, err := this.fp.Seek(offset, io.SeekStart)
if err != nil {
return err
}
for {
n, err := this.fp.Read(buf)
if n > 0 {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
func (this *FileReader) Read(buf []byte) (n int, err error) {
if this.bodySize == 0 {
n = 0
err = io.EOF
return
}
n, err = this.fp.Read(buf)
if err != nil && err != io.EOF {
_ = this.discard()
}
return
}
func (this *FileReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = start
if start < 0 {
offset = this.bodyOffset + this.bodySize + end
end = this.bodyOffset + this.bodySize - 1
} else if end < 0 {
offset = this.bodyOffset + start
end = this.bodyOffset + this.bodySize - 1
} else {
offset = this.bodyOffset + start
end = this.bodyOffset + end
}
if offset < 0 || end < 0 || offset > end {
isOk = true
return ErrInvalidRange
}
_, err := this.fp.Seek(offset, io.SeekStart)
if err != nil {
return err
}
for {
var n int
n, err = this.fp.Read(buf)
if n > 0 {
var n2 = int(end-offset) + 1
if n2 <= n {
_, e := callback(n2)
if e != nil {
isOk = true
return e
}
break
} else {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
offset += int64(n)
if offset > end {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
// 读取下一个Reader
if this.nextReader != nil {
defer func() {
_ = this.nextReader.Close()
}()
for {
var n int
n, err = this.nextReader.Read(buf)
if n > 0 {
goNext, writeErr := callback(n)
if writeErr != nil {
return writeErr
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
}
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *FileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
// FP 原始的文件句柄
func (this *FileReader) FP() *os.File {
return this.fp.Raw()
}
func (this *FileReader) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
if this.openFileCache != nil {
if this.openFile != nil {
this.openFileCache.Put(this.fp.Name(), this.openFile)
} else {
var cacheMeta = make([]byte, len(this.meta))
copy(cacheMeta, this.meta)
this.openFileCache.Put(this.fp.Name(), NewOpenFile(this.fp.Raw(), cacheMeta, this.header, this.LastModified(), this.bodySize))
}
return nil
}
return this.fp.Close()
}
func (this *FileReader) readToBuff(fp *fsutils.File, buf []byte) (ok bool, err error) {
n, err := fp.Read(buf)
if err != nil {
return false, err
}
ok = n == len(buf)
return
}
func (this *FileReader) discard() error {
_ = this.fp.Close()
this.isClosed = true
// close open file cache
if this.openFileCache != nil {
this.openFileCache.Close(this.fp.Name())
}
// remove file
return fsutils.Remove(this.fp.Name())
}

View File

@@ -0,0 +1,18 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !plus
package caches
import (
"errors"
"io"
)
type MMAPFileReader struct {
FileReader
}
func (this *MMAPFileReader) CopyBodyTo(writer io.Writer) (int, error) {
// stub
return 0, errors.New("not implemented")
}

View File

@@ -0,0 +1,22 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus && darwin
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
)
func IsValidForMMAPSize(size int64) bool {
var availableGB = int64(memutils.AvailableMemoryGB())
if availableGB < 1 {
return false
}
if mmap.TotalMMAPFileSize() > (availableGB << 28) /** 1/4 availableGB **/ {
return false
}
return size > (4<<10) && size < maxMMAPFileSize
}

View File

@@ -0,0 +1,22 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus && !darwin
package caches
import (
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
)
func IsValidForMMAPSize(size int64) bool {
var availableGB = int64(memutils.AvailableMemoryGB())
if availableGB < 1 {
return false
}
if mmap.TotalMMAPFileSize() > (availableGB << 27) /** 1/8 availableGB **/ {
return false
}
return size > (256<<10) && size < maxMMAPFileSize
}

View File

@@ -0,0 +1,421 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus
package caches
import (
"encoding/binary"
"errors"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
memutils "github.com/TeaOSLab/EdgeNode/internal/utils/mem"
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
"os"
"sync"
)
// 设置最大能映射的文件尺寸
var maxMMAPFileSize int64 = 8 << 20
func init() {
var estimatedSize = int64((memutils.SystemMemoryGB() * 4) << 20)
if estimatedSize > maxMMAPFileSize {
maxMMAPFileSize = estimatedSize
}
}
// MMAPFileReader 通过MMAP读取文件
type MMAPFileReader struct {
BaseReader
rawReader *mmap.SharedMMAP
path string
modifiedAt int64
meta []byte
header []byte
expiresAt int64
status int
headerOffset int64
headerSize int
bodySize int64
bodyOffset int64
currentOffset int64
isClosed bool
once sync.Once
}
func NewMMAPFileReaderFromPath(filename string) (*MMAPFileReader, error) {
reader, err := mmap.OpenSharedMMAP(filename)
if err != nil {
return nil, err
}
return NewMMAPFileReader(reader)
}
func NewMMAPFileReader(mmapReader *mmap.SharedMMAP) (*MMAPFileReader, error) {
return &MMAPFileReader{
path: mmapReader.Name(),
rawReader: mmapReader,
modifiedAt: mmapReader.Stat().ModTime().Unix(),
}, nil
}
func (this *MMAPFileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *MMAPFileReader) InitAutoDiscard(autoDiscard bool) error {
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, err := this.readNext(buf)
if err != nil {
return err
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
var status = types.Int(string(buf[OffsetStatus : OffsetStatus+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[OffsetURLLength : OffsetURLLength+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[OffsetHeaderLength : OffsetHeaderLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
var bodySize = int(binary.BigEndian.Uint64(buf[OffsetBodyLength : OffsetBodyLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
isOk = true
return nil
}
func (this *MMAPFileReader) TypeName() string {
return "disk"
}
func (this *MMAPFileReader) ExpiresAt() int64 {
return this.expiresAt
}
func (this *MMAPFileReader) Status() int {
return this.status
}
func (this *MMAPFileReader) LastModified() int64 {
if this.modifiedAt > 0 {
return this.modifiedAt
}
stat, err := os.Stat(this.path)
if err != nil {
return 0
}
return stat.ModTime().Unix()
}
func (this *MMAPFileReader) HeaderSize() int64 {
return int64(this.headerSize)
}
func (this *MMAPFileReader) BodySize() int64 {
return this.bodySize
}
func (this *MMAPFileReader) ReadHeader(buf []byte, callback ReaderFunc) error {
// 使用缓存
if len(this.header) > 0 && len(buf) >= len(this.header) {
copy(buf, this.header)
_, err := callback(len(this.header))
if err != nil {
return err
}
// 移动到Body位置
this.moveTo(this.bodyOffset)
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
this.moveTo(this.headerOffset)
var headerSize = this.headerSize
if len(buf) > headerSize {
n, err := this.rawReader.ReadAt(buf[:headerSize], this.headerOffset)
if err != nil {
if err != io.EOF {
return err
}
}
_, err = callback(n)
if err != nil {
isOk = true
return err
}
} else {
for {
n, err := this.read(buf)
if n > 0 {
if n < headerSize {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
headerSize -= n
} else {
_, e := callback(headerSize)
if e != nil {
isOk = true
return e
}
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
}
isOk = true
// 移动到Body位置
this.moveTo(this.bodyOffset)
return nil
}
func (this *MMAPFileReader) ReadBody(buf []byte, callback ReaderFunc) error {
if this.bodySize == 0 {
return nil
}
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = this.bodyOffset
// 开始读Body部分
this.moveTo(offset)
for {
n, err := this.read(buf)
if n > 0 {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
func (this *MMAPFileReader) Read(buf []byte) (n int, err error) {
if this.bodySize == 0 {
n = 0
err = io.EOF
return
}
n, err = this.read(buf)
if err != nil && err != io.EOF {
_ = this.discard()
}
return
}
func (this *MMAPFileReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
var isOk = false
defer func() {
if !isOk {
_ = this.discard()
}
}()
var offset = start
if start < 0 {
offset = this.bodyOffset + this.bodySize + end
end = this.bodyOffset + this.bodySize - 1
} else if end < 0 {
offset = this.bodyOffset + start
end = this.bodyOffset + this.bodySize - 1
} else {
offset = this.bodyOffset + start
end = this.bodyOffset + end
}
if offset < 0 || end < 0 || offset > end {
isOk = true
return ErrInvalidRange
}
this.moveTo(offset)
for {
n, err := this.read(buf)
if n > 0 {
var n2 = int(end-offset) + 1
if n2 <= n {
_, e := callback(n2)
if e != nil {
isOk = true
return e
}
break
} else {
goNext, e := callback(n)
if e != nil {
isOk = true
return e
}
if !goNext {
break
}
}
offset += int64(n)
if offset > end {
break
}
}
if err != nil {
if err != io.EOF {
return err
}
break
}
}
isOk = true
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *MMAPFileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
func (this *MMAPFileReader) CopyBodyTo(writer io.Writer) (int, error) {
return this.rawReader.Write(writer, int(this.bodyOffset))
}
func (this *MMAPFileReader) Close() error {
if this.isClosed {
return nil
}
this.isClosed = true
var err error
this.once.Do(func() {
err = this.rawReader.Close()
})
return err
}
func (this *MMAPFileReader) moveTo(offset int64) {
this.currentOffset = offset
}
func (this *MMAPFileReader) readNext(buf []byte) (ok bool, err error) {
n, err := this.rawReader.ReadAt(buf, this.currentOffset)
if n > 0 {
this.currentOffset += int64(n)
}
if err != nil {
return false, err
}
ok = n == len(buf)
return
}
func (this *MMAPFileReader) read(p []byte) (n int, err error) {
n, err = this.rawReader.ReadAt(p, this.currentOffset)
if n > 0 {
this.currentOffset += int64(n)
}
return
}
func (this *MMAPFileReader) discard() error {
this.once.Do(func() {
_ = this.rawReader.Close()
})
this.isClosed = true
// remove file
return fsutils.Remove(this.path)
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,203 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
"github.com/iwind/TeaGo/Tea"
"os"
"testing"
)
func TestFileReader(t *testing.T) {
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
_, path, _ := storage.keyPath("my-key")
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("file '" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
reader := NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
t.Fatal(err)
}
t.Log(reader.Status())
buf := make([]byte, 10)
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestFileReader_ReadHeader(t *testing.T) {
var path = "/Users/WorkSpace/EdgeProject/EdgeCache/p43/12/6b/126bbed90fc80f2bdfb19558948b0d49.cache"
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("'" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
var reader = NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
if os.IsNotExist(err) {
t.Log("file '" + path + "' not exists")
return
}
t.Fatal(err)
}
var buf = make([]byte, 16*1024)
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return
})
if err != nil {
t.Fatal(err)
}
}
func TestFileReader_Range(t *testing.T) {
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
/**writer, err := storage.Open("my-number", time.Now().Unix()+30*86400, 200, 6, 10)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte("Header"))
if err != nil {
t.Fatal(err)
}
_, err = writer.Write([]byte("0123456789"))
if err != nil {
t.Fatal(err)
}
_ = writer.Close()**/
_, path, _ := storage.keyPath("my-number")
fp, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
t.Log("'" + path + "' not exists")
return
}
t.Fatal(err)
}
defer func() {
_ = fp.Close()
}()
reader := NewFileReader(fsutils.NewFile(fp, fsutils.FlagRead))
err = reader.Init()
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 6)
{
err = reader.ReadBodyRange(buf, 0, 0, func(n int) (goNext bool, err error) {
t.Log("[0, 0]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 7, 7, func(n int) (goNext bool, err error) {
t.Log("[7, 7]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 0, 10, func(n int) (goNext bool, err error) {
t.Log("[0, 10]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, 5, func(n int) (goNext bool, err error) {
t.Log("[3, 5]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, -1, -3, func(n int) (goNext bool, err error) {
t.Log("[, -3]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, -1, func(n int) (goNext bool, err error) {
t.Log("[3, ]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,210 @@
package caches
import (
"errors"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"io"
)
type MemoryReader struct {
BaseReader
item *MemoryItem
offset int
}
func NewMemoryReader(item *MemoryItem) *MemoryReader {
return &MemoryReader{item: item}
}
func (this *MemoryReader) Init() error {
return nil
}
func (this *MemoryReader) TypeName() string {
return "memory"
}
func (this *MemoryReader) ExpiresAt() int64 {
return this.item.ExpiresAt
}
func (this *MemoryReader) Status() int {
return this.item.Status
}
func (this *MemoryReader) LastModified() int64 {
return this.item.ModifiedAt
}
func (this *MemoryReader) HeaderSize() int64 {
return int64(len(this.item.HeaderValue))
}
func (this *MemoryReader) BodySize() int64 {
return int64(len(this.item.BodyValue))
}
func (this *MemoryReader) ReadHeader(buf []byte, callback ReaderFunc) error {
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(this.item.HeaderValue)
offset := 0
for {
left := size - offset
if l <= left {
copy(buf, this.item.HeaderValue[offset:offset+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, this.item.HeaderValue[offset:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset += l
if offset >= size {
break
}
}
return nil
}
func (this *MemoryReader) ReadBody(buf []byte, callback ReaderFunc) error {
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(this.item.BodyValue)
offset := 0
for {
left := size - offset
if l <= left {
copy(buf, this.item.BodyValue[offset:offset+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, this.item.BodyValue[offset:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset += l
if offset >= size {
break
}
}
return nil
}
func (this *MemoryReader) Read(buf []byte) (n int, err error) {
bufLen := len(buf)
if bufLen == 0 {
return 0, errors.New("using empty buffer")
}
bodySize := len(this.item.BodyValue)
left := bodySize - this.offset
if bufLen <= left {
copy(buf, this.item.BodyValue[this.offset:this.offset+bufLen])
n = bufLen
this.offset += bufLen
if this.offset >= bodySize {
err = io.EOF
return
}
return
} else {
copy(buf, this.item.BodyValue[this.offset:])
n = left
err = io.EOF
return
}
}
func (this *MemoryReader) ReadBodyRange(buf []byte, start int64, end int64, callback ReaderFunc) error {
offset := start
bodySize := int64(len(this.item.BodyValue))
if start < 0 {
offset = bodySize + end
end = bodySize - 1
} else if end < 0 {
offset = start
end = bodySize - 1
}
if end >= bodySize {
end = bodySize - 1
}
if offset < 0 || end < 0 || offset > end {
return ErrInvalidRange
}
newData := this.item.BodyValue[offset : end+1]
l := len(buf)
if l == 0 {
return errors.New("using empty buffer")
}
size := len(newData)
offset2 := 0
for {
left := size - offset2
if l <= left {
copy(buf, newData[offset2:offset2+l])
goNext, e := callback(l)
if e != nil {
return e
}
if !goNext {
break
}
} else {
copy(buf, newData[offset2:])
_, e := callback(left)
if e != nil {
return e
}
break
}
offset2 += l
if offset2 >= size {
break
}
}
return nil
}
// ContainsRange 是否包含某些区间内容
func (this *MemoryReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
return r, true
}
func (this *MemoryReader) Close() error {
return nil
}

View File

@@ -0,0 +1,105 @@
package caches
import "testing"
func TestMemoryReader_Header(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: []byte("0123456789"),
BodyValue: nil,
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
err := reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("buf:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestMemoryReader_Body(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: nil,
BodyValue: []byte("0123456789"),
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
err := reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("buf:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
func TestMemoryReader_Body_Range(t *testing.T) {
item := &MemoryItem{
ExpiresAt: 0,
HeaderValue: nil,
BodyValue: []byte("0123456789"),
Status: 2000,
}
reader := NewMemoryReader(item)
buf := make([]byte, 6)
var err error
{
err = reader.ReadBodyRange(buf, 0, 0, func(n int) (goNext bool, err error) {
t.Log("[0, 0]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 7, 7, func(n int) (goNext bool, err error) {
t.Log("[7, 7]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 0, 10, func(n int) (goNext bool, err error) {
t.Log("[0, 10]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, 5, func(n int) (goNext bool, err error) {
t.Log("[3, 5]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, -1, -3, func(n int) (goNext bool, err error) {
t.Log("[, -3]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
{
err = reader.ReadBodyRange(buf, 3, -1, func(n int) (goNext bool, err error) {
t.Log("[3, ]", "body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -0,0 +1,152 @@
package caches
import (
"encoding/binary"
"errors"
"fmt"
fsutils "github.com/TeaOSLab/EdgeNode/internal/utils/fs"
rangeutils "github.com/TeaOSLab/EdgeNode/internal/utils/ranges"
"github.com/iwind/TeaGo/types"
"io"
)
type PartialFileReader struct {
*FileReader
ranges *PartialRanges
rangePath string
}
func NewPartialFileReader(fp *fsutils.File) *PartialFileReader {
return &PartialFileReader{
FileReader: NewFileReader(fp),
rangePath: PartialRangesFilePath(fp.Name()),
}
}
func (this *PartialFileReader) Init() error {
return this.InitAutoDiscard(true)
}
func (this *PartialFileReader) InitAutoDiscard(autoDiscard bool) error {
if this.openFile != nil {
this.meta = this.openFile.meta
this.header = this.openFile.header
}
var isOk = false
if autoDiscard {
defer func() {
if !isOk {
_ = this.discard()
}
}()
}
// 读取Range
ranges, err := NewPartialRangesFromFile(this.rangePath)
if err != nil {
return fmt.Errorf("read ranges failed: %w", err)
}
this.ranges = ranges
var buf = this.meta
if len(buf) == 0 {
buf = make([]byte, SizeMeta)
ok, readErr := this.readToBuff(this.fp, buf)
if readErr != nil {
return readErr
}
if !ok {
return ErrNotFound
}
this.meta = buf
}
this.expiresAt = int64(binary.BigEndian.Uint32(buf[:SizeExpiresAt]))
status := types.Int(string(buf[SizeExpiresAt : SizeExpiresAt+SizeStatus]))
if status < 100 || status > 999 {
return errors.New("invalid status")
}
this.status = status
// URL
var urlLength = binary.BigEndian.Uint32(buf[SizeExpiresAt+SizeStatus : SizeExpiresAt+SizeStatus+SizeURLLength])
// header
var headerSize = int(binary.BigEndian.Uint32(buf[SizeExpiresAt+SizeStatus+SizeURLLength : SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength]))
if headerSize == 0 {
return nil
}
this.headerSize = headerSize
this.headerOffset = int64(SizeMeta) + int64(urlLength)
// body
this.bodyOffset = this.headerOffset + int64(headerSize)
bodySize := int(binary.BigEndian.Uint64(buf[SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength : SizeExpiresAt+SizeStatus+SizeURLLength+SizeHeaderLength+SizeBodyLength]))
if bodySize == 0 {
isOk = true
return nil
}
this.bodySize = int64(bodySize)
// read header
if this.openFileCache != nil && len(this.header) == 0 {
if headerSize > 0 && headerSize <= 512 {
this.header = make([]byte, headerSize)
_, err = this.fp.Seek(this.headerOffset, io.SeekStart)
if err != nil {
return err
}
_, err = this.readToBuff(this.fp, this.header)
if err != nil {
return err
}
}
}
isOk = true
return nil
}
// ContainsRange 是否包含某些区间内容
// 这里的 r 是已经经过格式化的
func (this *PartialFileReader) ContainsRange(r rangeutils.Range) (r2 rangeutils.Range, ok bool) {
r2, ok = this.ranges.Nearest(r.Start(), r.End())
if ok && this.bodySize > 0 {
// 考虑可配置
const minSpan = 128 << 10
// 这里限制返回的最小缓存,防止因为返回的内容过小而导致请求过多
if r2.Length() < r.Length() && r2.Length() < minSpan {
ok = false
}
}
return
}
// MaxLength 获取区间最大长度
func (this *PartialFileReader) MaxLength() int64 {
if this.bodySize > 0 {
return this.bodySize
}
return this.ranges.Max() + 1
}
func (this *PartialFileReader) Ranges() *PartialRanges {
return this.ranges
}
func (this *PartialFileReader) IsCompleted() bool {
return this.ranges != nil && this.ranges.IsCompleted()
}
func (this *PartialFileReader) discard() error {
SharedPartialRangesQueue.Delete(this.rangePath)
_ = fsutils.Remove(this.rangePath)
return this.FileReader.discard()
}

View File

@@ -0,0 +1,7 @@
package caches
type Stat struct {
Count int // 数量
ValueSize int64 // 值占用的空间
Size int64 // 占用的空间尺寸
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build !plus
package caches
func (this *FileStorage) tryMMAPReader(isPartial bool, estimatedSize int64, path string) (Reader, error) {
// stub
return nil, nil
}

View File

@@ -0,0 +1,58 @@
// Copyright 2024 GoEdge CDN goedge.cdn@gmail.com. All rights reserved. Official site: https://goedge.cn .
//go:build plus
package caches
import (
"github.com/TeaOSLab/EdgeNode/internal/utils/mmap"
"os"
)
func (this *FileStorage) tryMMAPReader(isPartial bool, estimatedSize int64, path string) (Reader, error) {
// TODO 因为在实践中MMAP耗费了太多CPU在未解决之前先暂停支持
return nil, nil
var options = this.options // copy
if estimatedSize > 0 &&
!isPartial &&
(options != nil && options.EnableMMAP) &&
IsValidForMMAPSize(estimatedSize) {
var isOk bool
defer func() {
if !isOk {
_ = this.removeCacheFile(path)
}
}()
sharedMMAP, err := mmap.OpenSharedMMAP(path)
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
reader, err := NewMMAPFileReader(sharedMMAP)
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
err = reader.Init()
if err != nil {
if os.IsNotExist(err) {
isOk = true
return nil, ErrNotFound
}
return nil, err
}
isOk = true
return reader, nil
}
return nil, nil
}

View File

@@ -0,0 +1,707 @@
package caches
import (
"bytes"
"errors"
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
"github.com/TeaOSLab/EdgeNode/internal/utils"
"github.com/TeaOSLab/EdgeNode/internal/utils/testutils"
"github.com/iwind/TeaGo/Tea"
_ "github.com/iwind/TeaGo/bootstrap"
"github.com/iwind/TeaGo/logs"
"io"
"net/http"
"runtime"
"strconv"
"sync"
"testing"
"time"
)
func TestFileStorage_Init(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
//t.Log(storage.list.m)
/**err = storage.Write("c", bytes.NewReader([]byte("i am c")), 4, "second")
if err != nil {
t.Fatal(err)
}**/
//logs.PrintAsJSON(storage.list.m, t)
time.Sleep(2 * time.Second)
storage.purgeLoop()
t.Log(storage.list.(*SQLiteFileList).Stat(func(hash string) bool {
return true
}))
}
func TestFileStorage_OpenWriter(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
header := []byte("Header")
body := []byte("This is Body")
writer, err := storage.OpenWriter("my-key", time.Now().Unix()+86400, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
t.Log(writer)
_, err = writer.WriteHeader(header)
if err != nil {
t.Fatal(err)
}
_, err = writer.Write(body)
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log("header:", writer.HeaderSize(), "body:", writer.BodySize())
t.Log("ok")
}
func TestFileStorage_OpenWriter_Partial(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 2,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
writer, err := storage.OpenWriter("my-key", time.Now().Unix()+86400, 200, -1, -1, -1, true)
if err != nil {
t.Fatal(err)
}
_, err = writer.WriteHeader([]byte("Content-Type:text/html; charset=utf-8"))
if err != nil {
t.Fatal(err)
}
err = writer.WriteAt(0, []byte("Hello, World"))
if err != nil {
t.Fatal(err)
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log(writer)
}
func TestFileStorage_OpenWriter_HTTP(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
writer, err := storage.OpenWriter("my-http-response", time.Now().Unix()+86400, 200, -1, -1, -1, false)
if err != nil {
t.Fatal(err)
}
t.Log(writer)
resp := &http.Response{
StatusCode: http.StatusOK,
Header: http.Header{
"Content-Type": []string{"text/html; charset=utf-8"},
"Last-Modified": []string{"Wed, 06 Jan 2021 10:03:29 GMT"},
"Server": []string{"CDN-Server"},
},
Body: io.NopCloser(bytes.NewBuffer([]byte("THIS IS HTTP BODY"))),
}
for k, v := range resp.Header {
for _, v1 := range v {
_, err = writer.WriteHeader([]byte(k + ":" + v1 + "\n"))
if err != nil {
t.Fatal(err)
}
}
}
buf := make([]byte, 1024)
for {
n, err := resp.Body.Read(buf)
if n > 0 {
_, err = writer.Write(buf[:n])
if err != nil {
t.Fatal(err)
}
}
if err != nil {
break
}
}
err = writer.Close()
if err != nil {
t.Fatal(err)
}
t.Log("header:", writer.HeaderSize(), "body:", writer.BodySize())
t.Log("ok")
}
func TestFileStorage_Concurrent_Open_DifferentFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
wg := sync.WaitGroup{}
count := 100
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
writer, err := storage.OpenWriter("abc"+strconv.Itoa(i), time.Now().Unix()+3600, 200, -1, -1, -1, false)
if err != nil {
if errors.Is(err, ErrFileIsWriting) {
t.Error(err)
return
}
return
}
//t.Log(writer)
_, err = writer.Write([]byte("Hello,World"))
if err != nil {
t.Error(err)
return
}
// 故意造成慢速写入
time.Sleep(1 * time.Second)
err = writer.Close()
if err != nil {
t.Error(err)
return
}
}(i)
}
wg.Wait()
}
func TestFileStorage_Concurrent_Open_SameFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
defer func() {
t.Log(time.Since(now).Seconds()*1000, "ms")
}()
wg := sync.WaitGroup{}
count := 100
wg.Add(count)
for i := 0; i < count; i++ {
go func(i int) {
defer wg.Done()
writer, err := storage.OpenWriter("abc"+strconv.Itoa(0), time.Now().Unix()+3600, 200, -1, -1, -1, false)
if err != nil {
if errors.Is(err, ErrFileIsWriting) {
t.Error(err)
return
}
return
}
//t.Log(writer)
t.Log("writing")
_, err = writer.Write([]byte("Hello,World"))
if err != nil {
t.Error(err)
return
}
// 故意造成慢速写入
time.Sleep(time.Duration(1) * time.Second)
err = writer.Close()
if err != nil {
t.Error(err)
return
}
}(i)
}
wg.Wait()
}
func TestFileStorage_Read(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
reader, err := storage.OpenReader("my-key", false, false)
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 6)
t.Log(reader.Status())
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
t.Log("header:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Read_HTTP_Response(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
reader, err := storage.OpenReader("my-http-response", false, false)
if err != nil {
t.Fatal(err)
}
buf := make([]byte, 32)
t.Log(reader.Status())
headerBuf := []byte{}
err = reader.ReadHeader(buf, func(n int) (goNext bool, err error) {
headerBuf = append(headerBuf, buf...)
for {
nIndex := bytes.Index(headerBuf, []byte{'\n'})
if nIndex >= 0 {
row := headerBuf[:nIndex]
spaceIndex := bytes.Index(row, []byte{':'})
if spaceIndex <= 0 {
return false, errors.New("invalid header")
}
t.Log("header row:", string(row[:spaceIndex]), string(row[spaceIndex+1:]))
headerBuf = headerBuf[nIndex+1:]
} else {
break
}
}
return true, nil
})
if err != nil {
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Read_NotFound(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
now := time.Now()
buf := make([]byte, 6)
reader, err := storage.OpenReader("my-key-10000", false, false)
if err != nil {
if err == ErrNotFound {
t.Log("cache not fund")
return
}
t.Fatal(err)
}
err = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
t.Log("body:", string(buf[:n]))
return true, nil
})
if err != nil {
t.Fatal(err)
}
t.Log(time.Since(now).Seconds()*1000, "ms")
}
func TestFileStorage_Delete(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
err = storage.Delete("my-key")
if err != nil {
t.Fatal(err)
}
t.Log("ok")
}
func TestFileStorage_Stat(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
stat, err := storage.Stat()
if err != nil {
t.Fatal(err)
}
logs.PrintAsJSON(stat, t)
}
func TestFileStorage_CleanAll(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
before := time.Now()
defer func() {
t.Log(time.Since(before).Seconds()*1000, "ms")
}()
c, _ := storage.list.Count()
t.Log("before:", c)
err = storage.CleanAll()
if err != nil {
t.Fatal(err)
}
c, _ = storage.list.Count()
t.Log("after:", c)
t.Log("ok")
}
func TestFileStorage_Stop(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
storage.Stop()
}
func TestFileStorage_DecodeFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
t.Fatal(err)
}
_, path, _ := storage.keyPath("my-key")
t.Log(path)
}
func TestFileStorage_RemoveCacheFile(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(nil)
defer storage.Stop()
t.Log(storage.removeCacheFile("/Users/WorkSpace/EdgeProject/EdgeCache/p43/15/7e/157eba0dfc6dfb6fbbf20b1f9e584674.cache"))
}
func TestFileStorage_ScanGarbageCaches(t *testing.T) {
if !testutils.IsSingleTesting() {
return
}
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 43,
Options: map[string]any{"dir": "/Users/WorkSpace/EdgeProject/EdgeCache"},
})
err := storage.Init()
if err != nil {
t.Fatal(err)
}
err = storage.ScanGarbageCaches(func(path string) error {
t.Log(path, PartialRangesFilePath(path))
return nil
})
if err != nil {
t.Fatal(err)
}
}
func BenchmarkFileStorage_Read(b *testing.B) {
runtime.GOMAXPROCS(1)
_ = utils.SetRLimit(1 << 20)
var storage = NewFileStorage(&serverconfigs.HTTPCachePolicy{
Id: 1,
IsOn: true,
Options: map[string]interface{}{
"dir": Tea.Root + "/caches",
},
})
defer storage.Stop()
err := storage.Init()
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
reader, err := storage.OpenReader("my-key", false, false)
if err != nil {
b.Fatal(err)
}
buf := make([]byte, 1024)
_ = reader.ReadBody(buf, func(n int) (goNext bool, err error) {
return true, nil
})
_ = reader.Close()
}
}
func BenchmarkFileStorage_KeyPath(b *testing.B) {
runtime.GOMAXPROCS(1)
var storage = &FileStorage{
options: &serverconfigs.HTTPFileCacheStorage{},
policy: &serverconfigs.HTTPCachePolicy{Id: 1},
}
for i := 0; i < b.N; i++ {
_, _, _ = storage.keyPath(strconv.Itoa(i))
}
}

View File

@@ -0,0 +1,61 @@
package caches
import (
"github.com/TeaOSLab/EdgeCommon/pkg/serverconfigs"
)
// StorageInterface 缓存存储接口
type StorageInterface interface {
// Init 初始化
Init() error
// OpenReader 读取缓存
OpenReader(key string, useStale bool, isPartial bool) (reader Reader, err error)
// OpenWriter 打开缓存写入器等待写入
// size 和 maxSize 可能为-1
OpenWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64, maxSize int64, isPartial bool) (Writer, error)
// OpenFlushWriter 打开从其他媒介直接刷入的写入器
OpenFlushWriter(key string, expiresAt int64, status int, headerSize int, bodySize int64) (Writer, error)
// Delete 删除某个键值对应的缓存
Delete(key string) error
// Stat 统计缓存
Stat() (*Stat, error)
// TotalDiskSize 消耗的磁盘尺寸
TotalDiskSize() int64
// TotalMemorySize 内存尺寸
TotalMemorySize() int64
// CleanAll 清除所有缓存
CleanAll() error
// Purge 批量删除缓存
// urlType 值为file|dir
Purge(keys []string, urlType string) error
// Stop 停止缓存策略
Stop()
// Policy 获取当前存储的Policy
Policy() *serverconfigs.HTTPCachePolicy
// UpdatePolicy 修改策略
UpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy)
// CanUpdatePolicy 检查策略是否可以更新
CanUpdatePolicy(newPolicy *serverconfigs.HTTPCachePolicy) bool
// AddToList 将缓存添加到列表
AddToList(item *Item)
// IgnoreKey 忽略某个Key即不缓存某个Key
IgnoreKey(key string, maxSize int64)
// CanSendfile 是否支持Sendfile
CanSendfile() bool
}

Some files were not shown because too many files have changed in this diff Show More