实现解析器,往往是有注册中心, 服务端多实例, 客户端需要解析获取到服务端的列表,挑选一个进行连接, 而这个解析过程可以自定义.
- 注: grpc基于http/2是长连接,k8s里svc可能会出现问题, 可用如下方案 Headless svc+client挑选ip后连接.
// 通过headless获取所有grpc pod ip
grpcIps,_ := net.LookupIP("k8s-service")
// 执行我们自己的grpc负载均衡算法
targetIP := GrpcBalance(grpcIps) // 获取可用的ip
client := NewGrpcClient(targetIP) // 初始化grpc client
grpc 内置三种 resolver: passthrough, manual 和 dns, 下文分别从这三种分析.
passthrough模式 passthrough.go[2] 是 grpc 全局默认 resolver, 也就是我们传递的地址没有 scheme 时便会使用 passthrough 模式.
此模式和名字一样简单, 就是直接穿过, 在 resolve 阶段什么都不做, 直接将我们的地址作为 addrs 传给底层连接, 也就是真正 Dial 时才处理地址解析之类的事情.
使用 serviceName:port 作为地址连接时, serviceName 会被解析到对应的 service ip, 然后连接时会负载均衡到某一个 pod. 因此根本做不到服务发现和负载均衡.
当连接的 pod 退出时, 连接会断掉触发 grpc 重连, 再通过 serviceName 连接时, 连接到的也会是健康的 pod.
-
manual模式 manual.go[3] 是纯手动管理, 主要暴露两个方法:r.InitialState(s resolver.State)和r.UpdateState(s resolver.State). 明显不适合 k8s, 因为 pod 重启或者 node 重启或者扩容缩容时 pod 的 ip 都会发生改变. -
dns模式 dns_resolver.go[ dns 模式会在 resolve 阶段通过 dns lookup 将 host 解析成 ip, 作为 addrs 传入底层连接.
连接地址传入 dns:///serviceName:port 时, serviceName 会通过 dns 解析, 传入底层连接地址会变成 ['x.x.x.x:port']. 但是直接使用 serviceName 时, 解析出的 ip 是 service 的 ip, 然后底层 Dial 时, 也通过 service 和一个 pod 建立连接, 还是做不到服务发现和负载均衡.
custom-name-resolution
passthrough example
grpc实现自定义解析器
目录
go mod init grpc-demo
├── client
│ └── main.go
├── go.mod
├── go.sum
├── pb
│ ├── helloworld.pb.go
│ ├── helloworld.proto
│ └── helloworld_grpc.pb.go
├── readme.txt
└── server
└── main.go
代码
pb/helloworld.proto
syntax = "proto3";
package helloworld;
option go_package = ".;pb";
service Greeter {
rpc SayHello (HelloRequest) returns (HelloReply) {}
}
message HelloRequest {
string name = 1;
}
message HelloReply {
string message = 1;
}
//protoc --go_out=./proto --go-grpc_out=./proto ./proto/*.proto
server/main.go
package main
import (
"context"
"demo/pb"
"flag"
"fmt"
"google.golang.org/grpc"
"log"
"net"
)
var (
port = flag.Int("port", 50051, "The server port")
)
type server struct {
pb.UnimplementedGreeterServer
}
// 该函数定义必须与helloworld.pb.go定义的SayHello一致
func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
//打印客户端传入HelloRequest请求的Name参数
log.Printf("Received: %v", in.GetName())
//将name参数作为返回值,返回给客户端
return &pb.HelloReply{Message: "Service1: Hello " + in.GetName()}, nil
}
// main方法函数开始执行的地方
func main() {
flag.Parse()
// 调用标准库,监听50051端口的tcp连接
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
//创建grpc服务
s := grpc.NewServer()
//将server对象,也就是实现SayHello方法的对象,与grpc服务绑定
pb.RegisterGreeterServer(s, &server{})
// grpc服务开始接收访问50051端口的tcp连接数据
if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
client/main.go
package main
import (
"context"
"demo/pb"
"fmt"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/resolver"
"log"
"time"
)
// 全局注册Scheme为myservice的Resolver Build
func init() {
log.Println("&myServiceBuilder!")
resolver.Register(&myServiceBuilder{})
}
type myServiceBuilder struct {
}
func (*myServiceBuilder) Scheme() string {
log.Println("myServiceBuilder Scheme()!")
return "myservice"
}
// 创建Resolver实例
func (*myServiceBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
log.Println("myServiceBuilder Build()!")
r := &myServiceResolver{
target: target,
cc: cc,
}
r.start()
return r, nil
}
type myServiceResolver struct {
target resolver.Target
cc resolver.ClientConn
}
// 根据target不同,解析出不同的端口
func (r *myServiceResolver) start() {
log.Println("myServiceResolver start()!")
// 模拟myservice解析出两个Address
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ":50051"}, {Addr: ":50052"}}})
}
// 再次解析使用的解析方式不变
func (r *myServiceResolver) ResolveNow(o resolver.ResolveNowOptions) {
log.Println("myServiceResolver ResolveNow()!")
r.start()
}
func (*myServiceResolver) Close() {
log.Println("myServiceResolver Close()!")
}
const (
address1 = "myservice:///abc"
)
// 自定义负载均衡
// 初始化中进行注册
func init() {
balancer.Register(newMyPickBuilder())
}
func newMyPickBuilder() balancer.Builder {
log.Println("newMyPickBuilder()!")
return &myPickBuilder{}
}
type myPickBuilder struct{}
func (*myPickBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
log.Println("myPickBuilder Build()!")
return &myPickBalancer{
state: 0,
cc: cc,
subConns: make(map[resolver.Address]balancer.SubConn),
subConns1: make(map[balancer.SubConn]resolver.Address),
}
}
func (*myPickBuilder) Name() string {
log.Println("myPickBuilder Name()!")
return "mypickBalance"
}
type myPickBalancer struct {
state connectivity.State
cc balancer.ClientConn
subConns map[resolver.Address]balancer.SubConn
subConns1 map[balancer.SubConn]resolver.Address
}
func (b *myPickBalancer) ResolverError(err error) {
log.Println("myPickBalancer ResolverError()!")
// TODO 需要剔除无效连接
}
func (b *myPickBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
log.Println("myPickBalancer UpdateClientConnState()!")
addrsSet := make(map[resolver.Address]struct{})
for _, a := range s.ResolverState.Addresses {
addrsSet[a] = struct{}{}
if _, ok := b.subConns[a]; !ok {
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
if err != nil {
continue
}
b.subConns[a] = sc
sc.Connect()
}
}
return nil
}
func (b *myPickBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
log.Println("myPickBalancer UpdateSubConnState()!")
// TODO 需要剔除无效连接,增加有效连接
//if s.ConnectivityState == connectivity.Ready {
// b.subConns[b.subConns1[sc]] = sc
//}
log.Println("b.subConns", b.subConns)
var scs []balancer.SubConn
for _, sc := range b.subConns {
scs = append(scs, sc)
}
if len(b.subConns) == 2 {
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &myPicker{scs}})
}
}
func (b *myPickBalancer) Close() {
log.Println("myPickBalancer Close()!")
}
type myPicker struct {
subConns []balancer.SubConn
}
func (p *myPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
log.Println("myPicker Pick()!")
//获取当前时间
second := time.Now().Second()
fmt.Printf("Current Time Second:%d\n", second)
if second%2 == 0 {
return balancer.PickResult{SubConn: p.subConns[0]}, nil
}
return balancer.PickResult{SubConn: p.subConns[1]}, nil
}
func main() {
// 访问服务端address,创建连接conn,地址格式"myservice:///abc"
conn, err := grpc.Dial(address1, grpc.WithInsecure(), grpc.WithBlock(),
grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"mypickBalance":{}}]}`))
if err != nil {
log.Fatalf("did not connect: %v", err)
}
time.Sleep(100 * time.Millisecond)
defer conn.Close()
c := pb.NewGreeterClient(conn)
// 设置客户端访问超时时间1秒
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
// 客户端调用服务端 SayHello 请求,传入Name 为 "world", 返回值为服务端返回参数
r, err := c.SayHello(ctx, &pb.HelloRequest{Name: "world"})
if err != nil {
log.Fatalf("could not greet: %v", err)
}
// 根据服务端处理逻辑,返回值也为"world"
log.Printf("Greeting: %s", r.GetMessage())
time.Sleep(time.Second)
ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second)
defer cancel2()
// 客户端调用服务端 SayHello 请求,传入Name 为 "world", 返回值为服务端返回参数
r2, err2 := c.SayHello(ctx2, &pb.HelloRequest{Name: "world"})
if err2 != nil {
log.Fatalf("could not greet: %v", err2)
}
// 根据服务端处理逻辑,返回值也为"world"
log.Printf("Greeting: %s", r2.GetMessage())
}
测试
go run server/main.go --port 50051
go run server/main.go --port 50052
go run client/main.go