如何在Kubernetes环境下部署Hyperledger Fabric 网络?

50 阅读5分钟

在 Kubernetes 环境中部署 Hyperledger Fabric,看起来是一件“顺理成章”的事,但真正动手后你会发现:

  • 靠着完全手撸部署Fabric MSP / CA / Orderer / Peer 的生命周期管理非常复杂
  • 多组织、多通道场景几乎无法靠脚本长期维护

因此,本篇文章的主角Hyperledger-Bevel : 一个面向企业级 Fabric 网络的自动化部署与治理框架。

Hyperledger Bevel 能解决什么?

  1. 可以负责Fabric的网络编排
  2. 管理证书与MSP的生命周期
  3. 多组织,多通道的自动化
  4. 与k8s生态完全无缝衔接

375908f6-5352-4824-b074-63b236adcefe.png

实际上每个组织都会运行一个ca服务,该服务充当其他组织,orderer,用户交互颁发CA。也被称作中间CA,直接办法PeerMSP,OdererMSP等等

准备环境

  1. 准备Hyperledge Bevel源码Hyperledge Bevel
  2. 准备krew-linux_amd64.tar.gz、hlf-operator_v1.9.0_linux_amd64.zip
  3. istio helm仓库
  4. k8s环境
  5. harbor仓库

开始部署

目标k8s集群

/var/lib/krew/plugins/hlf.yaml

apiVersion: krew.googlecontainertools.github.com/v1alpha2
kind: Plugin
metadata:
  name: hlf
spec:
  version: "v1.9.0"
  homepage: https://github.com/hyperledger-bevel/bevel-operator-fabric
  shortDescription: "Deploy and manage Hyperledger Fabric components"
  description: |
    The kubectl-hlf plugin wraps the HLF Operator and provides a simple
    interface to create and manage Hyperledger Fabric Peers,
    Orderers and Certificate authorities
  caveats: |
    * For resources that are not in default namespace, currently you must
      specify -n/--namespace explicitly (the current namespace setting is not
      yet used).
  platforms:
    - selector:
        matchLabels:
          os: linux
          arch: amd64
      uri: http://192.168.31.253:8081/hlf/plugins/hlf-operator_v1.9.0_linux_amd64.zip
      sha256: d9fc43b51384df295462abe1d4c5d1349fc3e2ad47413f0f4bc279080e158b6b
      bin: kubectl-hlf

/home/hlf/hlf_install.sh


#!/usr/bin/env bash
set -euo pipefail

cd "$(mktemp -d)"
curl -fsSLO "http://192.168.31.253:8081/istio/krew-linux_amd64.tar.gz"
tar -zxvf krew-linux_amd64.tar.gz

# 2) 手动“安装”krew:把可执行文件放到 ~/.krew/bin,下名必须是 kubectl-krew
mkdir -p "/var/lib/krew-root/bin"
cp ./krew-linux_amd64 "/var/lib/krew-root/bin/kubectl-krew"
chmod +x "/var/lib/krew-root/bin/kubectl-krew"

# 3) 加 PATH(当前会话立即生效,并追加到 bashrc)
export PATH="/var/lib/krew-root/bin:${PATH}"
grep -q '.krew/bin' ~/.bashrc || echo 'export PATH="/var/lib/krew-root/bin:$PATH"' >> ~/.bashrc

mkdir -p /var/lib/krew/plugins/
curl http://192.168.31.253:8081/hlf/plugins/hlf-operator_v1.9.0_linux_amd64.zip -o /var/lib/krew/plugins/hlf-operator_v1.9.0_linux_amd64.zip
kubectl krew install   --manifest=/var/lib/krew/plugins/hlf.yaml   --archive=/var/lib/krew/plugins/hlf-operator_v1.9.0_linux_amd64.zip

helm repo add kfs http://192.168.31.253:8081/hlf --force-update
helm install hlf-operator --version=1.9.0 -- kfs/hlf-operator


# ====== 必填项(按需修改)======
helm repo add istio http://192.168.31.253:8081/istio-1.23.3
helm repo update
kubectl create ns istio-system --dry-run=client -o yaml | kubectl apply -f -
helm upgrade --install istio-base istio/base -n istio-system

helm upgrade --install istiod istio/istiod \
 -n istio-system --version 1.23.3 \
 --set global.hub=192.168.31.253:80/istio-release \
 --set global.tag=1.23.3 \
 --set global.imagePullSecrets[0]=harbor-creds

helm upgrade --install istio-ingressgateway istio/gateway -n istio-system --version 1.23.3 \
 --set global.hub=192.168.31.253:80/istio-release \
 --set global.tag=1.23.3 \
 --set global.imagePullSecrets[0]=harbor-creds \
 --set service.type=NodePort \
 --set service.ports[0].name=http2 --set service.ports[0].port=80 --set service.ports[0].targetPort=8080 --set service.ports[0].nodePort=30080 \
 --set service.ports[1].name=https --set service.ports[1].port=443 --set service.ports[1].targetPort=8443 --set service.ports[1].nodePort=30443

/home/istio-system/fabric-gw.yaml

apiVersion: networking.istio.io/v1beta1
    kind: Gateway
    metadata:
      name: fabric-gw
      namespace: istio-system
    spec:
      selector:
        istio: ingressgateway
      servers:
      - port:
          number: 443
          name: tls
          protocol: TLS
        tls:
          mode: PASSTHROUGH
        hosts:
          - "*"

/home/local-path/local-path-sc.yaml

apiVersion: v1
kind: Namespace
metadata:
  name: local-path-storage

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: local-path-provisioner-service-account
  namespace: local-path-storage

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: local-path-provisioner-role
  namespace: local-path-storage
rules:
  - apiGroups: [""]
    resources: ["pods"]
    verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: local-path-provisioner-role
rules:
  - apiGroups: [""]
    resources: ["nodes", "persistentvolumeclaims", "configmaps", "pods", "pods/log"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "patch", "update", "delete"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: local-path-provisioner-bind
  namespace: local-path-storage
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: local-path-provisioner-role
subjects:
  - kind: ServiceAccount
    name: local-path-provisioner-service-account
    namespace: local-path-storage

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: local-path-provisioner-bind
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: local-path-provisioner-role
subjects:
  - kind: ServiceAccount
    name: local-path-provisioner-service-account
    namespace: local-path-storage

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: local-path-provisioner
  namespace: local-path-storage
spec:
  replicas: 1
  selector:
    matchLabels:
      app: local-path-provisioner
  template:
    metadata:
      labels:
        app: local-path-provisioner
    spec:
      serviceAccountName: local-path-provisioner-service-account
      containers:
        - name: local-path-provisioner
          image: 192.168.31.253:80/rancher/local-path-provisioner:v0.0.32
          imagePullPolicy: IfNotPresent
          command:
            - local-path-provisioner
            - --debug
            - start
            - --config
            - /etc/config/config.json
          volumeMounts:
            - name: config-volume
              mountPath: /etc/config/
          env:
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            - name: CONFIG_MOUNT_PATH
              value: /etc/config/
      volumes:
        - name: config-volume
          configMap:
            name: local-path-config

---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
provisioner: rancher.io/local-path
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: local-path-config
  namespace: local-path-storage
data:
  config.json: |-
    {
            "nodePathMap":[
            {
                    "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
                    "paths":["/opt/local-path-provisioner"]
            }
            ]
    }
  setup: |-
    #!/bin/sh
    set -eu
    mkdir -m 0777 -p "$VOL_DIR"
  teardown: |-
    #!/bin/sh
    set -eu
    rm -rf "$VOL_DIR"
  helperPod.yaml: |-
    apiVersion: v1
    kind: Pod
    metadata:
      name: helper-pod
    spec:
      priorityClassName: system-node-critical
      tolerations:
        - key: node.kubernetes.io/disk-pressure
          operator: Exists
          effect: NoSchedule
      containers:
      - name: helper-pod
        image: 192.168.31.253:80/tools/busybox:stable
        imagePullPolicy: IfNotPresent

bash /home/hlf/hlf_install.sh
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >> ~/.bashrc
source ~/.bashrc
kubectl apply -f /home/istio-system/fabric-gw.yaml
kubectl apply -f /home/local-path/local-path-sc.yaml
  1. 这里需要讲明的是,要在目标集群安装hlf(hlf-opeartor)(建议)
  2. Istio 并非 Hyperledger Fabric 的必需组件,而是作为一个统一的网络入口与流量治理层,用于对 Peer 和 Orderer 的访问进行集中管理。但在本架构中,istio需要安装,统一fabric网络入口跨集群 / 跨网络访问
  3. 推荐另外安装haporxy/nginx 将外部流量转发到30080(http)30443(https)端口。

部署机

准备事项(/var/lib/krew/plugins/hlf.yaml与目标集群中的一样,并需要准备hlf-operator_v1.9.0_linux_amd64.zip文件)

##安装helm
mkdir -p /tmp/helm && cd /tmp/helm 
wget -q http://192.168.31.253:8081/debs/helm/helm-v3.14.4-linux-amd64.tar.gz
tar -zxvf helm-v3.14.4-linux-amd64.tar.gz sudo mv linux-amd64/helm /usr/local/bin/helm
sudo chmod +x /usr/local/bin/helm
##安装jq
apt-get install jq
##安装kubectl/或二进制方式安装
apt-get install -y kubectl=1.29.15
##安装ansible
pip install ansible==9.1.0
#安装kubernetes sdk
pip install "kubernetes>=26.1.0,<27"
ansible-galaxy collection install kubernetes.core
##安装hlf插件
export PATH="{KREW_ROOT:-HOME/.krew}/bin:$PATH"
kubectl krew install --manifest=/var/lib/krew/plugins/hlf.yaml --archive=/var/lib/krew/plugins/hlf-operator_v1.9.0_linux_amd64.zip

细则

  1. 解压/clone Hyperledge Bevel源码(/home/bevel2)
  2. 编写以下文件:将任何“192.168.31.135”改为你自己的外部ip地址,并且k8s.config_file为目标集群的config文件,需要自己替换掉
# inventories/fabric-operator/network-base.yaml
network:
  type: fabric                  # 必填;fabric 网络类型
  version: "2.5.4"              # 必填;你这版 schema 支持 2.5.3/2.5.4 等
  images:
    ca: "1.5.11"

  env:                          # 必填;operator + istio 的运行环境 :
    type: operator
    proxy: istio
    external_dns: enabled
    retry_count: 20

  docker:                       # 必填;私有/本地镜像仓库。url 必填
    url: "192.168.31.253:80"
    username: "admin"
    password: "Harbor12345"

  channels:
    - channel:
      consortium: SupplyChainConsortium
      channel_name: AllChannel
      channel_status: new
      osn_creator_org:
        name: ordererorg
      chaincodes: []
      orderers:
        - ordererorg
      participants:
        - organization:
          name: carrier
          type: creator
          org_status: new
          peers:
          - peer:
            name: peer0
            type: anchor
            gossipAddress: peer0.carrier-net.192.168.31.135.nip.io:443
            peerAddress: peer0.carrier-net.192.168.31.135.nip.io:443
          ordererAddress:  orderer1.ordererorg.192.168.31.135.nip.io:443

  orderers:
    - name: orderer1
      type: orderer
      org_name: ordererorg
      uri: "orderer1.ordererorg.192.168.31.135.nip.io:443"
      certificate: /path/ordererorg/tlsca-orderer1-server.crt
      #certificate: /path/ordererorg/server.crt

    - name: orderer2
      type: orderer
      org_name: ordererorg
      uri: "orderer2.ordererorg.192.168.31.135.nip.io:443"
      certificate: /path/ordererorg/tlsca-orderer2-server.crt
#     certificate: /path/ordererorg/server.crt

    - name: orderer3
      type: orderer
      org_name: ordererorg
      uri: "orderer3.ordererorg.192.168.31.135.nip.io:443"
      certificate: /path/ordererorg/tlsca-orderer3-server.crt
#      certificate: /path/ordererorg/server.crt
  organizations:
    - name: carrier
      type: peer
      external_url_suffix: "192.168.31.135.nip.io"
      country: "DE"
      state: "BW"
      location: "Stuttgart"
      subject: "/C=DE/ST=BW/L=Stuttgart/O=ca-carrier/OU=Fabric/CN=ca-carrier"
      external_url_suffix: "192.168.31.135.nip.io"
      cloud_provider: minikube
      k8s:
        context: "kubernetes-admin@kubernetes"
        config_file: "/home/ansible-playbook/.kube/config"
      services:
        ca:
          type: ca
          name: ca-carrier
          subject: "/C=DE/ST=BW/L=Stuttgart/O=carrier/OU=Fabric/CN=ca-carrier"
          grpc: { port: 7054 }   # 端口对象需含 port
        peers:
        - peer:
          name: peer0
          type: anchor
          gossippeeraddress: peer0.carrier-net.192.168.31.135.nip.io:443
          peerAddress: peer0.carrier-net.192.168.31.135.nip.io:443
          cli: disabled
          cactus_connector: disabled
          grpc:
            port: 7051
          events:
            port: 7053
          couchdb:
            port: 5984
          restserver:           # This is for the rest-api server
            targetPort: 20001
            port: 20001
          expressapi:           # This is for the express api server
            targetPort: 3000
            port: 3000
      ca_data:
        certificate: /path/carrier/server.crt
        url: carrier-ca.192.168.31.135.nip.io
    - name: ordererorg
      type: orderer
      # ↓↓↓ 这些都是 fabric_organization 的“强制项”
      country: "DE"
      state: "BW"
      location: "Stuttgart"
      subject: "/C=DE/ST=BW/L=Stuttgart/O=ordererorg/OU=Fabric/CN=ordererorg"
      external_url_suffix: "192.168.31.135.nip.io"

      # ↓↓↓ shared_organization 的强制项(非 AWS 时也必须声明)
      cloud_provider: minikube
      k8s:
        context: "kubernetes-admin@kubernetes"
        config_file: "/home/ansible-playbook/.kube/config"   # 注意键名是 config_file

      # ↓↓↓ fabric_services 的强制项:至少必须有 ca
      services:
        ca:
          type: ca
          name: ca-orderer
          subject: "/C=DE/ST=BW/L=Stuttgart/O=ordererorg/OU=Fabric/CN=ca-orderer"
          grpc: { port: 7054 }   # 端口对象需含 port
        orderers:
          - orderer:
            name: orderer1
            type: orderer
            consensus: raft
            grpc:
              port: 7050
            ordererAddress: orderer1.ordererorg.192.168.31.135.nip.io:443
          - orderer:
            name: orderer2
            type: orderer
            grpc:
              port: 7050
            consensus: raft
            ordererAddress: orderer2.ordererorg.192.168.31.135.nip.io:443
          - orderer:
            name: orderer3
            grpc:
              port: 7050
            consensus: raft
            type: orderer
            ordererAddress: orderer3.ordererorg.192.168.31.135.nip.io:443

      ca_data:
        certificate: /path/ordererorg/server.crt
        url: ordererorg-ca.192.168.31.135.nip.io
# inventory.ini
#这里是运行的python环境
[ansible_provisioners]
localhost ansible_connection=local ansible_python_interpreter=/root/bevel-venv/bin/python

[ansible_provisioners:vars]
# 你的 Bevel 根目录
bevel_repo_dir=/home/bevel2

# 默认 kubeconfig(network.yaml 会为每个组织指定各自的 kube.config_file 和 context,
# 替换掉自己的k8s 的配置文件
kubeconfig_default=/home/ansible-playbook/.kube/config

# 工具检查(可选,配合你的工作流)
verify_tools=true
use_krew=true

# Operator/Istio 相关(仅注释说明,真正的控制以 network.yaml 为准)
istio_namespace=istio-system
hlf_operator_namespace=default
[all:vars]
helm_components=[{"name":"istiod","namespace":"istio-system"},{"name":"istio-ingressgateway","namespace":"istio-system"}]

platforms/hyperledger-fabric/configuration/roles/operator/create/orderer/tasks/main.yaml platforms/hyperledger-fabric/configuration/roles/operator/create/peer/tasks/main.yaml platforms/hyperledger-fabric/configuration/roles/operator/create/ca/server/tasks/main.yaml

bevel源码中以上文件中需要修改自己的镜像地址

执行命令

ansible-playbook /home/bevel2/platforms/shared/configuration/site.yaml -i /home/bevel2/inventories/fabric-operator/inventory.ini -e @/home/bevel2/inventories/fabric-operator/119/network-base.yaml

本贴中用到的镜像/离线安装包 领取

藉此我们已经完成了fabric网络的初始化,并且已经创建了一个名为AllChannel的通道。

本人第一次发帖,有哪些不足欢迎指出