创建高可用PostgresSQL集群
部署完成数据库后,创建一个registry使用的数据库和harbor用户名
# 创建新用户
postgres=# create user harbor with password 'harbor123456';
CREATE ROLE
# 创建属于harbor用户的数据库
postgres=# create database registry owner harbor;
CREATE DATABASE
# 将testdb数据库的所有权限都赋予harbor
postgres=# grant all privileges on database registry to harbor;
GRANT
创建高可用redis集群
先暂时使用内部的redis
下载Harbor的Helm Chart
# 添加harbor的Helm源
[root@node1 ~]# helm repo add harbor https://helm.goharbor.io
# 搜索harbor Chart
[root@node1 ~]# helm search repo harbor
NAME CHART VERSION APP VERSION DESCRIPTION
bitnami/harbor 19.0.0 2.9.0 Harbor is an open source trusted cloud-native r...
harbor/harbor 1.13.0 2.9.0 An open source trusted cloud native registry th...
# 下载Chart
[root@node1 ~]# helm fetch harbor/harbor
# 解压
[root@node1 harbordir]# tar -xf harbor-1.13.0.tgz
[root@node1 harbordir]# ls
harbor harbor-1.13.0.tgz
修改Chart的values.yml文件
内容如下
expose:
# Set how to expose the service. Set the type as "ingress", "clusterIP", "nodePort" or "loadBalancer"
# and fill the information in the corresponding section
type: clusterIP # 使用clusterIP的方式暴露服务
tls:
# Enable TLS or not.
# Delete the "ssl-redirect" annotations in "expose.ingress.annotations" when TLS is disabled and "expose.type" is "ingress"
# Note: if the "expose.type" is "ingress" and TLS is disabled,
# the port must be included in the command when pulling/pushing images.
# Refer to https://github.com/goharbor/harbor/issues/5291 for details.
enabled: false # 关闭tls认证
portal:
image:
repository: goharbor/harbor-portal
tag: v2.9.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 3 # 设置portal的Pod的副本数量
revisionHistoryLimit: 10
core:
image:
repository: goharbor/harbor-core
tag: v2.9.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
replicas: 3 # 设置core的Pod的副本数量
revisionHistoryLimit: 10
jobservice:
image:
repository: goharbor/harbor-jobservice
tag: v2.9.0
replicas: 3 #设置jobservice的Pod的副本数量
revisionHistoryLimit: 10
# set the service account to be used, default if left empty
serviceAccountName: ""
registry:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
registry:
image:
repository: goharbor/registry-photon
tag: v2.9.0
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
controller:
image:
repository: goharbor/harbor-registryctl
tag: v2.9.0
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
extraEnvVars: []
replicas: 3 #设置registry的Pod的副本数量
trivy:
# enabled the flag to enable Trivy scanner
enabled: true
image:
# repository the repository for Trivy adapter image
repository: goharbor/trivy-adapter-photon
# tag the tag for Trivy adapter image
tag: v2.9.0
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
# replicas the number of Pod replicas
replicas: 3 #设置trivy的Pod的副本数量
# debugMode the flag to enable Trivy debug mode with more verbose scanning log
debugMode: false
database:
# if external database is used, set "type" to "external"
# and fill the connection information in "external" section
type: external # 修改为external表示使用外部的postgres
internal:
# set the service account to be used, default if left empty
serviceAccountName: ""
# mount the service account token
automountServiceAccountToken: false
image:
repository: goharbor/harbor-db
tag: v2.9.0
# The initial superuser password for internal database
password: "changeit"
# The size limit for Shared memory, pgSQL use it for shared_buffer
# More details see:
# https://github.com/goharbor/harbor/issues/15034
shmSizeLimit: 512Mi
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
# The timeout used in livenessProbe; 1 to 5 seconds
livenessProbe:
timeoutSeconds: 1
# The timeout used in readinessProbe; 1 to 5 seconds
readinessProbe:
timeoutSeconds: 1
extraEnvVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## The priority class to run the pod as
priorityClassName:
initContainer:
migrator: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
permissions: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
external:
host: "stolon-proxy.stolon.svc.cluster.local" # 设置外部数据库的地址和密码信息
port: "5432"
username: "harbor"
password: "harbor123456"
coreDatabase: "registry"
# if using existing secret, the key must be "password"
existingSecret: ""
# "disable" - No SSL
# "require" - Always SSL (skip verification)
# "verify-ca" - Always SSL (verify that the certificate presented by the
# server was signed by a trusted CA)
# "verify-full" - Always SSL (verify that the certification presented by the
# server was signed by a trusted CA and the server host name matches the one
# in the certificate)
sslmode: "disable"
# The maximum number of connections in the idle connection pool per pod (core+exporter).
# If it <=0, no idle connections are retained.
maxIdleConns: 100
# The maximum number of open connections to the database per pod (core+exporter).
# If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgre of harbor.
maxOpenConns: 900
## Additional deployment annotations
podAnnotations: {}
## Additional deployment labels
podLabels: {}
创建名称空间
[root@node1 harbor]# kubectl create ns harbor
namespace/harbor created
给命名空间添加标签,指示 Istio 在部署应用的时候,自动注入 Envoy 边车代理:
[root@node1 harbor]# kubectl label namespace harbor istio-injection=enabled
创建Harbor集群
[root@node1 harbor]# helm install harbor . -f values.yaml -n harbor
查看Service
[root@node1 harbor]# kubectl get service -n harbor
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
harbor ClusterIP 10.108.97.14 <none> 80/TCP 30m
harbor-core ClusterIP 10.101.221.35 <none> 80/TCP 30m
harbor-jobservice ClusterIP 10.97.185.160 <none> 80/TCP 30m
harbor-portal ClusterIP 10.109.247.37 <none> 80/TCP 30m
harbor-registry ClusterIP 10.105.202.200 <none> 5000/TCP,8080/TCP 30m
harbor-trivy ClusterIP 10.106.112.33 <none> 8080/TCP 30m
查看Pod
[root@node1 redis]# kubectl get pods -n harbor
NAME READY STATUS RESTARTS AGE
harbor-core-5c9b857794-hlw2x 2/2 Running 0 53s
harbor-core-5c9b857794-l6b9s 2/2 Running 0 114s
harbor-core-5c9b857794-wzxdr 2/2 Running 0 73s
harbor-jobservice-646c8567f5-8zw74 0/2 Init:0/1 0 114s
harbor-jobservice-6dcb8d948f-4n9nt 0/2 Init:0/1 0 49m
harbor-jobservice-6dcb8d948f-bpjwx 0/2 Init:0/1 0 49m
harbor-jobservice-6dcb8d948f-pvnf6 1/2 CrashLoopBackOff 6 (4m29s ago) 19m
harbor-nginx-5b76d7588d-llmbm 2/2 Running 0 49m
harbor-portal-68bcb5dd4c-grr49 2/2 Running 0 49m
harbor-portal-68bcb5dd4c-p7d4n 2/2 Running 0 49m
harbor-portal-68bcb5dd4c-tgsxn 2/2 Running 0 49m
harbor-redis-0 2/2 Running 0 114s
harbor-registry-694447458c-gkfzs 0/3 Init:0/1 0 49m
harbor-registry-694447458c-wlpx2 3/3 Running 2 (21m ago) 49m
harbor-registry-694447458c-x9qd8 0/3 Init:0/1 0 49m
harbor-registry-7b4877f74-6hr5n 0/3 Init:0/1 0 114s
harbor-trivy-0 2/2 Running 0 67s
harbor-trivy-1 2/2 Running 0 85s
harbor-trivy-2 2/2 Running 0 107s
使用Istio暴露harbor服务
修改istio-ingressgateway的external-ip
kubectl edit svc istio-ingressgateway -n istio-system
添加如下内容
可以看到暴露IP出来了,注意这里配置的externalIP不能是节点的ip,可以另外添加一个新的ip到 服务器上
生成ssl证书
创建ca-csr.json
创建ca证书的请求文件ca-csr.json
tee ca-csr.json << EOF
{
"CN":"www.myharbor.cn",
"key":{
"algo":"rsa",
"size":2048
},
"names":[
{
"C":"CN",
"ST":"BeiJing",
"L":"BeiJing",
"O":"harbor",
"OU":"System"
}
]
}
EOF
生成ca证书
[root@node1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2023/10/28 20:18:37 [INFO] generating a new CA key and certificate from CSR
2023/10/28 20:18:37 [INFO] generate received request
2023/10/28 20:18:37 [INFO] received CSR
2023/10/28 20:18:37 [INFO] generating key: rsa-2048
2023/10/28 20:18:37 [INFO] encoded CSR
2023/10/28 20:18:37 [INFO] signed certificate with serial number 355670949834431516094046025372819445862031595508
[root@node1 ssl]# ls
ca.csr ca-csr.json ca-key.pem ca.pem
根据CA根证书及其私钥签名生成其它证书及其私钥
创建CA根证书配置文件
tee ca-config.json << EOF
{
"signing":{
"default":{
"expiry":"175200h"
},
"profiles":{
"harbor":{
"expiry":"175200h",
"usages":[
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
创建目标证书签名请求文件
tee myharbor-csr.json << EOF
{
"CN":"myharbor.cn",
"key":{
"algo":"rsa",
"size":2048
},
"hosts":[
"127.0.0.1",
"192.168.202.111",
"192.168.202.129",
"192.168.202.130",
"192.168.202.131",
"192.168.202.132",
"www.myharbor.cn"
],
"names":[
{
"C":"CN",
"ST":"BeiJing",
"L":"BeiJing",
"O":"etcd-op",
"OU":"System"
}
]
}
EOF
根据CA根证书及其私钥签名生成目标证书和私钥
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=harbor myharbor-csr.json | cfssljson -bare mybarbor
查看当前文件夹所有文件
[root@node1 ssl]# ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem mybarbor.csr mybarbor-key.pem mybarbor.pem myharbor-csr.json
创建一个保存密钥的secret
[root@node1 ssl]# kubectl create -n istio-system secret tls harbor-credential --key mybarbor-key.pem --cert mybarbor.pem
secret/harbor-credential created
创建gateway
tee harbor-gateway.yaml << EOF
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: harbor-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80 # 接收http流量
name: http
protocol: HTTP
hosts:
- "www.myharbor.cn"
- port:
number: 443 # 接收加密的https流量
name: https
protocol: HTTPS
tls:
mode: SIMPLE # 一个安全的链接
credentialName: harbor-credential
hosts:
- "www.myharbor.cn"
EOF
应用gateway yaml文件
[root@node1 ssl]# kubectl apply -f harbor-gateway.yaml
gateway.networking.istio.io/harbor-gateway created
查看网关
[root@node1 ssl]# kubectl get gw -n istio-system
NAME AGE
harbor-gateway 4m41s
创建virtualservice
tee harbor-virtualservice.yaml << EOF
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: harbor-virtualservice
namespace: harbor
spec:
hosts:
- "www.myharbor.cn" # 该host需要与前面定义的gateway上面的相同
gateways:
- istio-system/harbor-gateway # 指定将该virtualservice应用在哪个网关上
http:
- match:
- uri:
prefix: /
route:
- destination:
host: harbor # 指定集群Cluster名称
port:
number: 80 # 指定端口
EOF
应用yaml文件
[root@node1 ssl]# kubectl apply -f harbor-virtualservice.yaml
virtualservice.networking.istio.io/harbor-virtualservice created
查看ingressgateway的访问日志
[root@node1 harbor]# kubectl logs -f -l istio=ingressgateway -c istio-proxy -n istio-system
测试网站
配置/etc/hosts
在客户端主机的/etc/hosts中添加对www.myharbor.cn的解析,解析的ip为192.168.202.111