Go 生产级敏感词过滤系统:DFA(Trie) / AC 可切换 + Normalize 预处理 + 双缓冲热更新 + etcd Watch + 灰度双版本

6 阅读9分钟

Go 生产级敏感词过滤系统:DFA(Trie) / AC 可切换 + Normalize 预处理 + 双缓冲热更新 + etcd Watch + 灰度双版本

覆盖能力:高性能(内存匹配)可治理(scope/动作/白名单)无感热更新(atomic 双缓冲)etcd watch 秒级生效 + 对账兜底灰度双版本(stable/canary)可演进(DFA → AC、Basic Normalize → Strong Normalize)


目录

    1. 需求与核心结论
    1. 规则模型(JSON)
    1. 算法与工程分层
    1. 基础实现:DFA(Trie) + Basic Normalize(filter 包)
    1. 可选升级:AC 自动机(filter/ac.go)
    1. 双缓冲热更新:atomic 原子切换(hot 包)
    1. etcd Watch 热更:断线/压缩恢复 + 定时对账(etcdhot 包)
    1. 灰度双版本:stable/canary 按 userID 路由(hot/gray.go)
    1. 更强 Normalize:NFKC + 宽度折叠 + 去组合音标(filter/normalize_strong.go)
    1. main 集成示例

1. 需求与核心结论

1.1 生产环境你真正需要的组合

  • 匹配算法层:DFA(Trie) 与 AC 可替换(按词库规模、文本长度、P99 目标选择)
  • 工程治理层(必做):Normalize、白名单、双缓冲热更、版本化发布/回滚、watch + 对账兜底
  • 发布策略层(强烈建议):灰度双版本(stable/canary)按 userID 路由,稳定后 Promote

1.2 推荐升级路线

  1. DFA + Basic Normalize + 白名单 + atomic 热更 + etcd watch + 对账兜底
  2. 加灰度双版本(stable/canary)
  3. 词库很大/长文本/P99 苛刻:matcher 切 AC
  4. 对抗更强:NormalizeStrong +(必要时)替换的局部窗口定位

2. 规则模型(JSON)

{
  "version": "2026-01-27_001",
  "scopes": {
    "comment": {
      "terms": [
        {"word": "bad", "action": "REPLACE", "replace_with": "***"},
        {"word": "evil", "action": "BLOCK"},
        {"word": "admin", "action": "REVIEW"}
      ],
      "whitelist": ["badminton"]
    }
  }
}
  • actionBLOCK | REPLACE | TAG | REVIEW
  • scope:按业务域拆分(comment/nickname/chat/post…)建议必做
  • version:用于热更判断/对账/回滚

3. 算法与工程分层

3.1 三层结构

  • Matcher(算法层):DFA Trie 或 AC 自动机
  • Pipeline(策略层):Normalize → Match → 白名单覆盖 → 动作(拦截/替换/标记/审核)
  • Runtime(运行时层):双缓冲热更(后台构建 + atomic swap)+ watch/对账 + 灰度

4. 基础实现:DFA(Trie) + Basic Normalize(filter 包)

特点:最长匹配;动作输出;白名单覆盖(简化覆盖逻辑可直接用)。

package filter

import (
	"strings"
	"unicode"
)

type Action string

const (
	ActionBlock   Action = "BLOCK"
	ActionReplace Action = "REPLACE"
	ActionTag     Action = "TAG"
	ActionReview  Action = "REVIEW"
)

type Term struct {
	Word        string `json:"word"`
	Action      Action `json:"action"`
	ReplaceWith string `json:"replace_with"`
}

type ScopeRules struct {
	Terms     []Term   `json:"terms"`
	Whitelist []string `json:"whitelist"`
}

type RuleSet struct {
	Version string                `json:"version"`
	Scopes  map[string]ScopeRules `json:"scopes"`
}

type Hit struct {
	Start int
	End   int // [Start, End) on normalized rune index
	Term  Term
}

type Result struct {
	Allowed    bool
	OutText    string
	Hits       []Hit
	Tags       []string
	NeedReview bool
	Version    string
	Scope      string
}

type Matcher interface {
	Match(scope string, text string) Result
	Version() string
}

// -------------------- Basic Normalize --------------------
// 小写 + 去空白/部分符号(按业务调整)
// idxMap: norm rune序 -> 原文 byte index(基础替换映射,常见场景够用)
func Normalize(s string) (norm string, idxMap []int) {
	idxMap = make([]int, 0, len(s))
	var b strings.Builder
	b.Grow(len(s))

	for i, r := range s {
		r = unicode.ToLower(r)
		// 常见插入绕过分隔符(按业务容忍度增减)
		if unicode.IsSpace(r) || r == '.' || r == '_' || r == '-' || r == '*' {
			continue
		}
		b.WriteRune(r)
		idxMap = append(idxMap, i)
	}
	return b.String(), idxMap
}

// -------------------- DFA Trie --------------------
type node struct {
	next map[rune]int
	end  *Term
}

type trie struct {
	nodes []node
}

func newTrie() *trie {
	t := &trie{nodes: make([]node, 1)}
	t.nodes[0] = node{next: make(map[rune]int)}
	return t
}

func (t *trie) insert(word string, term Term) {
	cur := 0
	for _, r := range word {
		n := &t.nodes[cur]
		if n.next == nil {
			n.next = make(map[rune]int)
		}
		nx, ok := n.next[r]
		if !ok {
			t.nodes = append(t.nodes, node{next: make(map[rune]int)})
			nx = len(t.nodes) - 1
			n.next[r] = nx
		}
		cur = nx
	}
	tt := term
	t.nodes[cur].end = &tt
}

// 最长匹配:每个起点 i 往下走,记录最远 end,命中后跳过区间
func (t *trie) longestMatchAll(s string) []Hit {
	rs := []rune(s)
	hits := make([]Hit, 0, 8)

	for i := 0; i < len(rs); i++ {
		cur := 0
		bestEnd := -1
		var bestTerm *Term

		for j := i; j < len(rs); j++ {
			n := t.nodes[cur]
			nx, ok := n.next[rs[j]]
			if !ok {
				break
			}
			cur = nx
			if t.nodes[cur].end != nil {
				bestEnd = j + 1
				bestTerm = t.nodes[cur].end
			}
		}

		if bestEnd != -1 && bestTerm != nil {
			hits = append(hits, Hit{Start: i, End: bestEnd, Term: *bestTerm})
			i = bestEnd - 1
		}
	}
	return hits
}

// -------------------- Engine(按 scope) --------------------
type Engine struct {
	version string
	scopes  map[string]*scopeEngine
}

type scopeEngine struct {
	trie      *trie
	whiteTrie *trie
}

func NewEngine(rs *RuleSet) (*Engine, error) {
	e := &Engine{
		version: rs.Version,
		scopes:  make(map[string]*scopeEngine, len(rs.Scopes)),
	}

	for scope, rules := range rs.Scopes {
		se := &scopeEngine{trie: newTrie(), whiteTrie: newTrie()}

		// whitelist
		for _, w := range rules.Whitelist {
			nw, _ := Normalize(w)
			if nw != "" {
				se.whiteTrie.insert(nw, Term{Word: nw, Action: ActionTag})
			}
		}
		// terms
		for _, term := range rules.Terms {
			nw, _ := Normalize(term.Word)
			if nw == "" {
				continue
			}
			term.Word = nw
			if term.Action == ActionReplace && term.ReplaceWith == "" {
				term.ReplaceWith = "***"
			}
			se.trie.insert(nw, term)
		}

		e.scopes[scope] = se
	}
	return e, nil
}

func (e *Engine) Version() string { return e.version }

func (e *Engine) Match(scope string, text string) Result {
	se := e.scopes[scope]
	if se == nil {
		return Result{Allowed: true, OutText: text, Version: e.version, Scope: scope}
	}

	norm, idxMap := Normalize(text)
	if norm == "" {
		return Result{Allowed: true, OutText: text, Version: e.version, Scope: scope}
	}

	hits := se.trie.longestMatchAll(norm)
	if len(hits) == 0 {
		return Result{Allowed: true, OutText: text, Version: e.version, Scope: scope}
	}

	// 白名单覆盖(简化:白名单命中区间覆盖敏感命中区间则跳过)
	whiteHits := se.whiteTrie.longestMatchAll(norm)
	covered := func(h Hit) bool {
		for _, wh := range whiteHits {
			if wh.Start <= h.Start && wh.End >= h.End {
				return true
			}
		}
		return false
	}

	type rep struct{ l, r int; with string }
	repls := make([]rep, 0, 4)

	resHits := make([]Hit, 0, len(hits))
	allowed := true
	needReview := false
	tags := make([]string, 0, 4)

	for _, h := range hits {
		if covered(h) {
			continue
		}
		resHits = append(resHits, h)

		// 映射回原文(byte index)
		if h.Start >= len(idxMap) || h.End-1 >= len(idxMap) {
			continue
		}
		l := idxMap[h.Start]
		r := idxMap[h.End-1] + 1

		switch h.Term.Action {
		case ActionBlock:
			allowed = false
		case ActionReplace:
			repls = append(repls, rep{l: l, r: r, with: h.Term.ReplaceWith})
		case ActionTag:
			tags = append(tags, h.Term.Word)
		case ActionReview:
			needReview = true
		}
	}

	// 从后往前替换,避免区间偏移
	out := text
	if len(repls) > 0 {
		for i := 0; i < len(repls); i++ {
			for j := i + 1; j < len(repls); j++ {
				if repls[j].l > repls[i].l {
					repls[i], repls[j] = repls[j], repls[i]
				}
			}
		}
		b := []byte(out)
		for _, rp := range repls {
			if rp.l < 0 || rp.r > len(b) || rp.l >= rp.r {
				continue
			}
			b = append(b[:rp.l], append([]byte(rp.with), b[rp.r:]...)...)
		}
		out = string(b)
	}

	if !allowed {
		out = ""
	}

	return Result{
		Allowed:    allowed,
		OutText:    out,
		Hits:       resHits,
		Tags:       tags,
		NeedReview: needReview,
		Version:    e.version,
		Scope:      scope,
	}
}

5. 可选升级:AC 自动机(filter/ac.go)

热更/灰度/runner 不变,仅替换 matcher 内部结构:insert + build(fail) + matchAll。

package filter

type acNode struct {
	next   map[rune]int
	fail   int
	output []Term
}

type acAuto struct {
	nodes []acNode
}

func newAC() *acAuto {
	a := &acAuto{nodes: make([]acNode, 1)}
	a.nodes[0] = acNode{next: make(map[rune]int), fail: 0}
	return a
}

func (a *acAuto) insert(word string, term Term) {
	cur := 0
	for _, r := range word {
		n := &a.nodes[cur]
		if n.next == nil { n.next = make(map[rune]int) }
		nx, ok := n.next[r]
		if !ok {
			a.nodes = append(a.nodes, acNode{next: make(map[rune]int)})
			nx = len(a.nodes) - 1
			n.next[r] = nx
		}
		cur = nx
	}
	a.nodes[cur].output = append(a.nodes[cur].output, term)
}

func (a *acAuto) build() {
	q := make([]int, 0, len(a.nodes))

	for _, nx := range a.nodes[0].next {
		a.nodes[nx].fail = 0
		q = append(q, nx)
	}

	for head := 0; head < len(q); head++ {
		v := q[head]
		for ch, to := range a.nodes[v].next {
			q = append(q, to)

			f := a.nodes[v].fail
			for f != 0 {
				if nxt, ok := a.nodes[f].next[ch]; ok {
					f = nxt
					break
				}
				f = a.nodes[f].fail
			}
			if f == 0 {
				if nxt, ok := a.nodes[0].next[ch]; ok && v != 0 {
					f = nxt
				}
			}

			a.nodes[to].fail = f
			if len(a.nodes[f].output) > 0 {
				a.nodes[to].output = append(a.nodes[to].output, a.nodes[f].output...)
			}
		}
	}
}

func (a *acAuto) matchAll(norm string) []Hit {
	rs := []rune(norm)
	hits := make([]Hit, 0, 16)
	state := 0

	for i, ch := range rs {
		for state != 0 {
			if _, ok := a.nodes[state].next[ch]; ok { break }
			state = a.nodes[state].fail
		}
		if nx, ok := a.nodes[state].next[ch]; ok {
			state = nx
		}
		for _, t := range a.nodes[state].output {
			wlen := len([]rune(t.Word))
			start := i - wlen + 1
			if start >= 0 {
				hits = append(hits, Hit{Start: start, End: i + 1, Term: t})
			}
		}
	}
	return hits
}

6. 双缓冲热更新:atomic 原子切换(hot 包)

package hot

import (
	"context"
	"encoding/json"
	"errors"
	"io"
	"net/http"
	"os"
	"sync/atomic"
	"time"

	"yourmod/filter"
)

type Loader interface {
	Load(ctx context.Context) (rs *filter.RuleSet, raw []byte, err error)
}

type FileLoader struct{ Path string }

func (l FileLoader) Load(ctx context.Context) (*filter.RuleSet, []byte, error) {
	b, err := os.ReadFile(l.Path)
	if err != nil { return nil, nil, err }
	var rs filter.RuleSet
	if err := json.Unmarshal(b, &rs); err != nil { return nil, nil, err }
	return &rs, b, nil
}

type HTTPLoader struct {
	URL     string
	Timeout time.Duration
}

func (l HTTPLoader) Load(ctx context.Context) (*filter.RuleSet, []byte, error) {
	to := l.Timeout
	if to <= 0 { to = 3 * time.Second }
	req, _ := http.NewRequestWithContext(ctx, http.MethodGet, l.URL, nil)
	resp, err := (&http.Client{Timeout: to}).Do(req)
	if err != nil { return nil, nil, err }
	defer resp.Body.Close()
	if resp.StatusCode/100 != 2 { return nil, nil, errors.New("http status not 2xx") }
	b, err := io.ReadAll(resp.Body)
	if err != nil { return nil, nil, err }
	var rs filter.RuleSet
	if err := json.Unmarshal(b, &rs); err != nil { return nil, nil, err }
	return &rs, b, nil
}

type HotMatcher struct {
	cur atomic.Value // store filter.Matcher
}

func NewHotMatcher(initial filter.Matcher) *HotMatcher {
	h := &HotMatcher{}
	h.cur.Store(initial)
	return h
}

func (h *HotMatcher) Match(scope, text string) filter.Result {
	return h.cur.Load().(filter.Matcher).Match(scope, text)
}

func (h *HotMatcher) Version() string {
	return h.cur.Load().(filter.Matcher).Version()
}

func (h *HotMatcher) UpdateOnce(ctx context.Context, ld Loader) (newVersion string, changed bool, err error) {
	rs, _, err := ld.Load(ctx)
	if err != nil { return "", false, err }
	old := h.cur.Load().(filter.Matcher)
	if rs.Version != "" && rs.Version == old.Version() {
		return rs.Version, false, nil
	}
	eng, err := filter.NewEngine(rs)
	if err != nil { return "", false, err }
	h.cur.Store(eng)
	return eng.Version(), true, nil
}

// 兜底轮询:可作为 etcd watch 的对账补充
func (h *HotMatcher) RunPoll(ctx context.Context, ld Loader, interval time.Duration, onChange func(ver string)) error {
	if interval <= 0 { interval = 5 * time.Second }
	tk := time.NewTicker(interval)
	defer tk.Stop()

	for {
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-tk.C:
			ver, changed, err := h.UpdateOnce(ctx, ld)
			if err != nil { continue }
			if changed && onChange != nil { onChange(ver) }
		}
	}
}

7. etcd Watch 热更:断线/压缩恢复 + 定时对账(etcdhot 包)

7.1 Key 设计(stable)

  • sensitive/current/<scope> = <stable_version>
  • sensitive/rules/<version> = <RuleSet JSON>
package etcdhot

import (
	"context"
	"encoding/json"
	"errors"
	"fmt"
	"time"

	clientv3 "go.etcd.io/etcd/client/v3"
	"yourmod/filter"
)

type Config struct {
	PrefixCurrent string // "sensitive/current/"
	PrefixRules   string // "sensitive/rules/"
	Scope         string // "comment"
	Timeout       time.Duration
}

func (c Config) currentKey() string { return c.PrefixCurrent + c.Scope }
func (c Config) rulesKey(ver string) string { return c.PrefixRules + ver }

type EtcdRuleLoader struct {
	cli *clientv3.Client
	cfg Config
}

func NewEtcdRuleLoader(cli *clientv3.Client, cfg Config) *EtcdRuleLoader {
	if cfg.Timeout <= 0 { cfg.Timeout = 2 * time.Second }
	return &EtcdRuleLoader{cli: cli, cfg: cfg}
}

func (l *EtcdRuleLoader) Load(ctx context.Context) (*filter.RuleSet, []byte, error) {
	ctx, cancel := context.WithTimeout(ctx, l.cfg.Timeout)
	defer cancel()

	resp, err := l.cli.Get(ctx, l.cfg.currentKey())
	if err != nil { return nil, nil, err }
	if len(resp.Kvs) == 0 { return nil, nil, fmt.Errorf("current not found: %s", l.cfg.currentKey()) }

	ver := string(resp.Kvs[0].Value)
	if ver == "" { return nil, nil, errors.New("empty version") }

	resp2, err := l.cli.Get(ctx, l.cfg.rulesKey(ver))
	if err != nil { return nil, nil, err }
	if len(resp2.Kvs) == 0 { return nil, nil, fmt.Errorf("ruleset not found: %s", l.cfg.rulesKey(ver)) }

	raw := resp2.Kvs[0].Value
	var rs filter.RuleSet
	if err := json.Unmarshal(raw, &rs); err != nil { return nil, nil, err }
	if rs.Version == "" { rs.Version = ver }
	return &rs, raw, nil
}
package etcdhot

import (
	"context"
	"log"
	"time"

	clientv3 "go.etcd.io/etcd/client/v3"
	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"

	"yourmod/hot"
)

type WatchRunner struct {
	cli    *clientv3.Client
	loader *EtcdRuleLoader
	hm     *hot.HotMatcher

	reconcileInterval time.Duration // 30~60s recommended
}

func NewWatchRunner(cli *clientv3.Client, loader *EtcdRuleLoader, hm *hot.HotMatcher) *WatchRunner {
	return &WatchRunner{
		cli:               cli,
		loader:            loader,
		hm:                hm,
		reconcileInterval: 30 * time.Second,
	}
}

func (w *WatchRunner) SetReconcileInterval(d time.Duration) { if d > 0 { w.reconcileInterval = d } }

func (w *WatchRunner) Run(ctx context.Context) error {
	// 启动先对齐一次
	if ver, changed, err := w.hm.UpdateOnce(ctx, w.loader); err == nil && changed {
		log.Printf("[rule] init sync version=%s", ver)
	}

	rev, err := w.getCurrentRevision(ctx)
	if err != nil {
		log.Printf("[rule] getCurrentRevision failed: %v", err)
		rev = 0
	}

	watchCh := w.cli.Watch(ctx, w.loader.cfg.currentKey(), clientv3.WithRev(rev))
	ticker := time.NewTicker(w.reconcileInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ctx.Done():
			return ctx.Err()

		case <-ticker.C:
			// 定时对账兜底
			if ver, changed, err := w.hm.UpdateOnce(ctx, w.loader); err != nil {
				log.Printf("[rule] reconcile failed: %v", err)
			} else if changed {
				log.Printf("[rule] reconcile updated version=%s", ver)
			}

		case wr, ok := <-watchCh:
			if !ok {
				rev2, _ := w.getCurrentRevision(ctx)
				watchCh = w.cli.Watch(ctx, w.loader.cfg.currentKey(), clientv3.WithRev(rev2))
				continue
			}
			if wr.Err() != nil {
				if wr.Err() == rpctypes.ErrCompacted {
					rev2, _ := w.getCurrentRevision(ctx)
					watchCh = w.cli.Watch(ctx, w.loader.cfg.currentKey(), clientv3.WithRev(rev2))
					continue
				}
				log.Printf("[rule] watch err: %v", wr.Err())
				rev2, _ := w.getCurrentRevision(ctx)
				watchCh = w.cli.Watch(ctx, w.loader.cfg.currentKey(), clientv3.WithRev(rev2))
				continue
			}
			// 收到事件,触发更新
			if ver, changed, err := w.hm.UpdateOnce(ctx, w.loader); err != nil {
				log.Printf("[rule] updateOnce failed: %v", err)
			} else if changed {
				log.Printf("[rule] watch updated version=%s", ver)
			}
		}
	}
}

func (w *WatchRunner) getCurrentRevision(ctx context.Context) (int64, error) {
	ctx2, cancel := context.WithTimeout(ctx, 2*time.Second)
	defer cancel()
	resp, err := w.cli.Get(ctx2, w.loader.cfg.currentKey())
	if err != nil { return 0, err }
	return resp.Header.Revision, nil
}

8. 灰度双版本:stable/canary 按 userID 路由(hot/gray.go)

核心:同时持有 stable/canary 两套 matcher + canaryPercent。稳定后 Promote。

package hot

import (
	"hash/fnv"
	"sync/atomic"

	"yourmod/filter"
)

type grayState struct {
	Stable        filter.Matcher
	Canary        filter.Matcher
	CanaryPercent uint32 // 0..100
	Salt          string
}

type GrayMatcher struct {
	st atomic.Value // store grayState
}

func NewGrayMatcher(stable filter.Matcher, salt string) *GrayMatcher {
	g := &GrayMatcher{}
	g.st.Store(grayState{Stable: stable, Canary: nil, CanaryPercent: 0, Salt: salt})
	return g
}

func (g *GrayMatcher) SetCanary(canary filter.Matcher) {
	s := g.st.Load().(grayState)
	s.Canary = canary
	g.st.Store(s)
}

func (g *GrayMatcher) SetCanaryPercent(p uint32) {
	if p > 100 { p = 100 }
	s := g.st.Load().(grayState)
	s.CanaryPercent = p
	g.st.Store(s)
}

func (g *GrayMatcher) Promote() {
	s := g.st.Load().(grayState)
	if s.Canary != nil {
		s.Stable = s.Canary
		s.Canary = nil
		s.CanaryPercent = 0
		g.st.Store(s)
	}
}

func (g *GrayMatcher) pick(userID string) filter.Matcher {
	s := g.st.Load().(grayState)
	if s.Canary == nil || s.CanaryPercent == 0 { return s.Stable }

	h := fnv.New32a()
	_, _ = h.Write([]byte(userID))
	_, _ = h.Write([]byte(s.Salt))
	if (h.Sum32() % 100) < s.CanaryPercent {
		return s.Canary
	}
	return s.Stable
}

func (g *GrayMatcher) MatchWithUser(scope, userID, text string) filter.Result {
	return g.pick(userID).Match(scope, text)
}

8.1 灰度 Key 设计(etcd)

  • stable 指针:sensitive/current/<scope>
  • canary 指针:sensitive/current_canary/<scope>
  • 灰度比例:sensitive/canary_percent/<scope>
  • 规则内容:sensitive/rules/<version>

发布顺序建议:

  1. sensitive/rules/<ver>
  2. current_canary/<scope>=<ver>
  3. canary_percent/<scope>=10/30/50/...
  4. 全量验证后,把 current/<scope> 切到新版本,然后清空 canary

9. 更强 Normalize:NFKC + 宽度折叠 + 去组合音标 + 分隔符策略(filter/normalize_strong.go)

依赖:

  • golang.org/x/text/unicode/norm
  • golang.org/x/text/width
package filter

import (
	"unicode"
	"unicode/utf8"

	"golang.org/x/text/unicode/norm"
	"golang.org/x/text/width"
)

func isIgnorableSep(r rune) bool {
	if unicode.IsSpace(r) { return true }
	switch r {
	case '.', '_', '-', '*', '#', '@', '!', '~', '`', ',', ',', '。', '·', '|', '/', '\\', ':', ';':
		return true
	default:
		return false
	}
}

func isCombiningMark(r rune) bool {
	return unicode.Is(unicode.Mn, r)
}

// NormalizeStrong:用于“检测命中”的强归一化
// 注意:NFKC/宽度折叠会改变长度与位置映射;替换落点建议用“局部窗口定位”更稳。
func NormalizeStrong(s string) (normRunes []rune, posMap []int) {
	ns := norm.NFKC.String(s)
	ns = width.Fold.String(ns)

	normRunes = make([]rune, 0, len(ns))
	posMap = make([]int, 0, len(ns))

	for i := 0; i < len(ns); {
		r, size := utf8.DecodeRuneInString(ns[i:])
		ri := i
		i += size

		r = unicode.ToLower(r)
		if isCombiningMark(r) { continue }
		if isIgnorableSep(r) { continue }

		normRunes = append(normRunes, r)
		posMap = append(posMap, ri)
	}
	return
}

10. main 集成示例(etcd stable 热更 + 读路径无锁)

package main

import (
	"context"
	"log"
	"os/signal"
	"syscall"
	"time"

	clientv3 "go.etcd.io/etcd/client/v3"

	"yourmod/filter"
	"yourmod/hot"
	"yourmod/etcdhot"
)

func mustInitFromEtcd(cli *clientv3.Client, cfg etcdhot.Config) filter.Matcher {
	loader := etcdhot.NewEtcdRuleLoader(cli, cfg)
	rs, _, err := loader.Load(context.Background())
	if err != nil { panic(err) }
	eng, err := filter.NewEngine(rs)
	if err != nil { panic(err) }
	return eng
}

func main() {
	ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
	defer cancel()

	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   []string{"http://127.0.0.1:2379"},
		DialTimeout: 3 * time.Second,
	})
	if err != nil { panic(err) }
	defer cli.Close()

	cfg := etcdhot.Config{
		PrefixCurrent: "sensitive/current/",
		PrefixRules:   "sensitive/rules/",
		Scope:         "comment",
		Timeout:       2 * time.Second,
	}

	initMatcher := mustInitFromEtcd(cli, cfg)
	hm := hot.NewHotMatcher(initMatcher)

	loader := etcdhot.NewEtcdRuleLoader(cli, cfg)
	runner := etcdhot.NewWatchRunner(cli, loader, hm)
	runner.SetReconcileInterval(30 * time.Second)

	go func() {
		if err := runner.Run(ctx); err != nil {
			log.Printf("[rule] runner exit: %v", err)
		}
	}()

	// 业务调用:读路径无锁
	r := hm.Match("comment", "This is b a d !!!")
	log.Printf("allowed=%v out=%q version=%s hits=%d review=%v",
		r.Allowed, r.OutText, r.Version, len(r.Hits), r.NeedReview)

	<-ctx.Done()
}