目录
键入章标题(第 1 级)1
键入章标题(第 2 级)2
键入章标题(第 3 级)3
键入章标题(第 1 级)4
键入章标题(第 2 级)5
-
方案概述
本手册详细描述了 POCKETBI-QC 集群的部署流程及组件参数优化。
-
设备硬件配置及角色规划
| 主机名 | IP 地址 | 硬件配置 | 系统版本 | 承载角色 |
|---|---|---|---|---|
| qcshspi11119 | 172.20.230.105 | 48C-377GB-128GB+36.4TB(7.3TB*5) | centeros7.8 | CM-Server\JournalNode\DataNode\NodeManager\ZooKeeper-Server |
| qcshspi11120 | 172.20.230.106 | 64C-256GB-50GB+7.3TB*6 | centeros7.6 | CM-Agent\MySQL主\Balancer\NameNode\JournalNode\DataNode\ResourceManager\NodeManager\ZooKeeper-Server |
| qcshspi11121 | 172.20.230.107 | 64C-256GB-50GB+7.3TB*6 | centeros7.6 | CM-Agent\MySQL备\NameNode\JournalNode\DataNode\ResourceManager\NodeManager\ZooKeeper-Server |
-
节点基础优化:
-
添加 CenterOS YUM 源
-
[root@allnodes ~]# cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bkup
[root@allnodes ~]# vi /etc/yum.repos.d/CentOS-Base.repo
[CentOS7-Base]
name = CenterOS7-Base
baseurl = https://mirrors.aliyun.com/centos/7/os/x86_64/
gpgcheck = 1
gpgkey = https://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
enabled = 1
[CentOS7-Epel]
name = CentOS7-Epel
baseurl = https://mirrors.aliyun.com/epel/7/x86_64/
gpgcheck = 0
enabled = 1
[Ansible]
name = Ansible
baseurl = https://mirrors.aliyun.com/centos/7/configmanagement/x86_64/ansible-29/
gpgcheck = 0
enabled = 1
2. ## 挂载磁盘:
[root@allnodes ~]# yum install parted -y
[root@allnodes ~]# parted
(parted) select /dev/sda #轮询所有数据盘
(parted) mklabel gpt #yes
(parted) mkpart primary 0 -1 #ignore
(parted) print
(parted) q
[root@allnodes ~]# mkfs.xfs -f /dev/sda #轮询所有数据盘
[root@allnodes ~]# xfs_admin -L /mnt/disk1 /dev/sda1 #轮询所有数据盘
[root@allnodes ~]# for i in 1 2 3 4 5 6; do mkdir /mnt/disk$i; done
[root@allnodes ~]# ll /mnt |grep disk
[root@allnodes ~]# cp /etc/fstab /etc/fstab.bkup
[root@allnodes ~]# cat >> /etc/fstab << EOF
LABEL=/mnt/disk1 /mnt/disk1 xfs noatime,nodiratime 1 2
LABEL=/mnt/disk2 /mnt/disk2 xfs noatime,nodiratime 1 2
LABEL=/mnt/disk3 /mnt/disk3 xfs noatime,nodiratime 1 2
LABEL=/mnt/disk4 /mnt/disk4 xfs noatime,nodiratime 1 2
LABEL=/mnt/disk5 /mnt/disk5 xfs noatime,nodiratime 1 2
LABEL=/mnt/disk6 /mnt/disk6 xfs noatime,nodiratime 1 2
EOF
[root@allnodes ~]# mount -a
3. ## 关闭 Selinux
[root@allnodes ~]# cat /etc/selinux/config
SELINUX=disabled
[root@allnodes ~]# getenforce
Disabled
4. ## 关闭防火墙
[root@allnodes ~]# systemctl stop firewalld && systemctl disable firewalld && systemctl status firewalld
5. ## 配置 NTP 服务
[root@allnodes ~]# yum install chrony -y
[root@allnodes ~]# cp /etc/chrony.conf /etc/chrony.conf.bakup
[root@allnodes ~]# vi /etc/chrony.conf
# server 0.centos.pool.ntp.org iburst
# server 1.centos.pool.ntp.org iburst
# server 2.centos.pool.ntp.org iburst
# server 3.centos.pool.ntp.org iburst
server 172.20.230.105 iburst
[root@allnodes ~]# systemctl enable chronyd --now
[root@allnodes ~]# chronyc sources -v
MS Name/IP address Stratum Poll Reach LastRx Last sample
==============================================================================
^* 172.20.230.80 3 6 17 1 +188ns[-6323ns] +/- 21ms
[root@allnodes ~]# date
-
更改主机名
[root@allnodes ~]# hostnamectl set-hostname qcshspi11055
[root@allnodes ~]# hostname
7. ## 配置免密登录
[root@allnodes ~]# cat /etc/ssh/sshd_config |grep PermitRootLogin
[root@allnodes ~]# cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bkup
[root@allnodes ~]# vi /etc/ssh/sshd_config #PermitRootLogin yes
[root@allnodes ~]# service sshd restart
[root@master ~]# ls -ltr .ssh/ #若已存在id_rsa文件,不能重新获取rsa
[root@master ~]# ssh-keygen -t rsa
[root@master ~]# cat ~/.ssh/id_rsa.pub
[root@allnodes ~]# cp ~/.ssh/authorized_keys ~/.ssh/authorized_keys.bkup && vi ~/.ssh/authorized_keys
[root@master ~]# ssh root@172.20.230.105 #轮询所有节点
[root@allnodes ~]# scp -r root@172.20.179.23:/data/ibm/app/newnode-profile/java /usr/
8. ## 配置 OracleJDK
[root@allnodes ~]# cp /etc/profile /etc/profile.bkup
[root@allnodes ~]# vi /etc/profile
#CDH-Java 1.8.0_181 2021-11-05
export JAVA_HOME=/usr/java/jdk1.8.0_181-cloudera
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
[root@allnodes ~]# source /etc/profile
[root@allnodes ~]# java -version && echo $JAVA_HOME
java version "1.8.0_181"
Java(TM) SE Runtime Environment (build 1.8.0_181-b13)
Java HotSpot(TM) 64-Bit Server VM (build 25.181-b13, mixed mode)
/usr/java/jdk1.8.0_181-cloudera
9. ## 通过 Ansible 配置 HOSTS 文件
[root@master ~]# yum install ansible -y
[root@master ~]# mkdir -p /root/ansible/roles
[root@master ~]# cd /root/ansible
[root@master ~]# vi inventory
[cm]
172.20.230.105
[all]
172.20.230.105
172.20.230.106
172.20.230.107
[root@master ~]# vi ansible.cfg
[defaults]
inventory = /root/ansible/inventory
roles = /root/ansible/roles
[root@master ~]# ansible all -m ping
[root@master ~]# ansible all -m shell -a ' cp /etc/hosts /etc/hosts.bkup '
[root@master ~]# ansible all -m shell -a ' echo "#CDH-Nodes 2021-11-05" >> /etc/hosts && echo "172.20.230.105 qcshspi11119" >> /etc/hosts && echo "172.20.230.106 qcshspi11120" >> /etc/hosts && echo "172.20.230.107 qcshspi11119" >> /etc/hosts '
[root@master ~]# ansible all -m shell -a ' cat /etc/hosts '
10. ## 配置 vm.swapiness
[root@allnodes ~]# swapoff -a
[root@allnodes ~]# vi /etc/fstab #注释swap挂载
[root@allnodes ~]# free -m
total used free shared buff/cache available
Mem: 257417 1985 255145 9 285 254579
Swap: 0 0 0
[root@allnodes ~]# cp /etc/sysctl.conf /etc/sysctl.conf.bkup
[root@allnodes ~]#
echo "net.core.netdev_max_backlog = 2000" >> /etc/sysctl.conf
echo "net.core.somaxconn = 2048" >> /etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_time = 60" >> /etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_intvl = 20" >> /etc/sysctl.conf
echo "net.ipv4.tcp_keepalive_probes = 3" >> /etc/sysctl.conf
echo "net.ipv4.tcp_sack = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_fack = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_timestamps = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_window_scaling = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_tw_reuse = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_tw_recycle = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_fin_timeout = 30" >> /etc/sysctl.conf
echo "net.ipv4.tcp_mem = 379008 505344 758016" >> /etc/sysctl.conf
echo "net.ipv4.tcp_wmem = 4096 16384 4194304" >> /etc/sysctl.conf
echo "net.ipv4.tcp_rmem = 4096 87380 4194304" >> /etc/sysctl.conf
echo "net.core.rmem_default = 8388608" >> /etc/sysctl.conf
echo "net.core.rmem_max = 16777216" >> /etc/sysctl.conf
echo "net.core.wmem_default = 8388608" >> /etc/sysctl.conf
echo "net.core.wmem_max = 16777216" >> /etc/sysctl.conf
echo "vm.swappiness = 0" >> /etc/sysctl.conf
[root@allnodes ~]# sysctl -p
11. ## 配置 transparent_hugepage
[root@allnodes ~]# echo never > /sys/kernel/mm/transparent_hugepage/enabled && echo never > /sys/kernel/mm/transparent_hugepage/defrag
[root@allnodes ~]# cp /etc/rc.local /etc/rc.local.bkup
[root@allnodes ~]# echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.local && echo "echo never > /sys/kernel/mm/transparent_hugepage/defrag" >> /etc/rc.local
12. ## 配置 最大连接数
[root@allnodes ~]# ls -ltr /etc/security/limits.d/
total 4
-rw-r--r--. 1 root root 191 Apr 1 2020 20-nproc.conf
[root@allnodes ~]# cp /etc/security/limits.d/20-nproc.conf /etc/security/limits.d/20-nproc.conf.bkup
[root@allnodes ~]# vi /etc/security/limits.d/20-nproc.conf
* soft nproc 192039
root soft nproc unlimited
13. ## 安装 CDH 依赖包
[root@allnodes ~]# yum install python bind-utils psmisc libxslt zlib sqlite cyrus-sasl-plain cyrus-sasl-gssapi fuse fuse-libs redhat-lsb mod_ssl perl.x86_64 -y
5. # 数据库部署
1. ## 安装 MySQL
root@mysql ~]# rpm -qa |grep mariadb && rpm -qa |grep mysql
mariadb-libs-5.5.60-1.el7_5.x86_64
[root@mysql ~]# yum remove mariadb* -y
[root@mysql ~]# scp -r root@172.20.179.23:/data/ibm/mysql/mysql-5.7.27 /opt/
[root@mysql ~]# rpm -ivh /opt/mysql-5.7.27/mysql-community-common-5.7.27-1.el7.x86_64.rpm && rpm -ivh /opt/mysql-5.7.27/mysql-community-libs-5.7.27-1.el7.x86_64.rpm && rpm -ivh /opt/mysql-5.7.27/mysql-community-client-5.7.27-1.el7.x86_64.rpm && rpm -ivh /opt/mysql-5.7.27/mysql-community-server-5.7.27-1.el7.x86_64.rpm && rpm -ivh /opt/mysql-community-libs-compat-5.7.27-1.el7.x86_64.rpm
[root@mysql ~]# systemctl restart mysqld && systemctl enable mysqld && systemctl status mysqld
[root@mysql ~]# cat /var/log/mysqld.log |grep password
2021-11-05T03:47:42.499030Z 1 [Note] A temporary password is generated for root@localhost: sL4c;Wxqn35>
[root@mysql ~]# mysql -uroot -p #sL4c;Wxqn35
mysql> set global validate_password_policy=0;
mysql> set global validate_password_number_count=0;
mysql> set global validate_password_length=3;
mysql> set password = password('root');
mysql> grant all privileges on *.* to 'root'@'localhost' identified by 'root' with grant option;
mysql> grant all privileges on *.* to 'root'@'%' identified by 'root' with grant option;
mysql> flush privileges;
mysql> select user,host from mysql.user;
+---------------+-----------+
| user | host |
+---------------+-----------+
| root | % |
| mysql.session | localhost |
| mysql.sys | localhost |
| root | localhost |
+---------------+-----------+
mysql> exit
[root@mysql ~]# mysql -uroot -proot
mysql> create database test;
mysql> show databases;
mysql> drop database test;
mysql> show databases;
2. ## MySQL主从配置
[root@mysql-Master ~]# vi /etc/my.cnf
[mysqld]
server-id=1
log-bin=mysql-bin
[root@mysql-Master ~]# mysql -uroot -proot
mysql> grant replication slave on *.* to 'root'@'172.20.230.107' identified by 'root'; #slave IP 地址
mysql> flush privileges;
mysql> exit
[root@mysql-Master ~]# systemctl restart mysqld
[root@mysql-Master ~]# mysql -uroot -proot
mysql> show master status;
+------------------+----------+--------------+------------------+-------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+------------------+----------+--------------+------------------+-------------------+
| mysql-bin.000001 | 154 | | | |
+------------------+----------+--------------+------------------+-------------------+
[root@mysql-Slave ~]# vi /etc/my.cnf
[mysqld]
server-id=2
log-bin=mysql-bin
[root@mysql-Slave ~]# mysql -uroot -proot
[root@mysql-Slave ~]# service mysqld restart
mysql> change master to
-> master_host='172.20.230.106',
-> master_user='root',
-> master_password='root',
-> master_log_file='mysql-bin.000001',
-> master_log_pos=154;
mysql> start slave;
mysql> show slave status\G;
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: 172.20.230.106
Master_User: root
Master_Port: 3306
Connect_Retry: 60
Master_Log_File: mysql-bin.000001
Read_Master_Log_Pos: 154
Relay_Log_File: qcshspi11121-relay-bin.000002
Relay_Log_Pos: 320
Relay_Master_Log_File: mysql-bin.000001
Slave_IO_Running: Yes #表示slave的日志读取线程开启
Slave_SQL_Running: Yes #表示SQL执行线程开启
Replicate_Do_DB:
Replicate_Ignore_DB:
Replicate_Do_Table:
Replicate_Ignore_Table:
Replicate_Wild_Do_Table:
Replicate_Wild_Ignore_Table:
Last_Errno: 0
Last_Error:
Skip_Counter: 0
Exec_Master_Log_Pos: 154
Relay_Log_Space: 534
Until_Condition: None
Until_Log_File:
Until_Log_Pos: 0
Master_SSL_Allowed: No
Master_SSL_CA_File:
Master_SSL_CA_Path:
Master_SSL_Cert:
Master_SSL_Cipher:
Master_SSL_Key:
Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
Last_IO_Errno: 0
Last_IO_Error:
Last_SQL_Errno: 0
Last_SQL_Error:
Replicate_Ignore_Server_Ids:
Master_Server_Id: 1
Master_UUID: 212eeca3-3deb-11ec-9cb5-b4055d65b708
Master_Info_File: /var/lib/mysql/master.info
SQL_Delay: 0
SQL_Remaining_Delay: NULL
Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
Master_Retry_Count: 86400
Master_Bind:
Last_IO_Error_Timestamp:
Last_SQL_Error_Timestamp:
Master_SSL_Crl:
Master_SSL_Crlpath:
Retrieved_Gtid_Set:
Executed_Gtid_Set:
Auto_Position: 0
Replicate_Rewrite_DB:
Channel_Name:
Master_TLS_Version:
1 row in set (0.00 sec)
ERROR:
No query specified
3. ## MySQL主从测试
[root@mysql-Master ~]# mysql -uroot -proot
mysql> create database test;
mysql> use test;
mysql> CREATE TABLE `t_test` (`id` int NOT NULL AUTO_INCREMENT ,`content` varchar(20) NULL ,PRIMARY KEY (`id`));
mysql> INSERT INTO `t_test` (`content`) VALUES ('test1'),('test2'),('test3'),('test4');
[root@mysql-Slave ~]# mysql -uroot -proot
mysql> show databases;
mysql> use test;
mysql> select * from t_test;
4. ## MySQL创建CDH用户及库
[root@mysql-Master ~]# mysql -uroot -proot
mysql>
CREATE DATABASE scm DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE amon DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE rman DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE hue DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE metastore DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE sentry DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE nav DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE navms DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE DATABASE oozie DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| amon |
| hue |
| metastore |
| mysql |
| nav |
| navms |
| oozie |
| performance_schema |
| rman |
| scm |
| sentry |
| sys |
+--------------------+
mysql>
create user hive identified by 'Hive@123';
grant all privileges on *.* to 'hive'@'localhost' identified by 'Hive@123' with grant option;
grant all privileges on *.* to 'hive'@'%' identified by 'Hive@123' with grant option;
create user scm identified by 'Scm@123';
grant all privileges on *.* to 'scm'@'localhost' identified by 'scm' with grant option;
grant all privileges on *.* to 'scm'@'%' identified by 'scm' with grant option;
create user amon identified by 'Amon@123';
grant all privileges on *.* to 'amon'@'localhost' identified by 'amon' with grant option;
grant all privileges on *.* to 'amon'@'%' identified by 'amon' with grant option;
create user rman identified by 'Rman@123';
grant all privileges on *.* to 'rman'@'localhost' identified by 'rman' with grant option;
grant all privileges on *.* to 'rman'@'%' identified by 'rman' with grant option;
create user hue identified by 'Hue@123';
grant all privileges on *.* to 'hue'@'localhost' identified by 'hue' with grant option;
grant all privileges on *.* to 'hue'@'%' identified by 'hue' with grant option;
create user sentry identified by 'Sentry@123';
grant all privileges on *.* to 'sentry'@'localhost' identified by 'sentry' with grant option;
grant all privileges on *.* to 'sentry'@'%' identified by 'sentry' with grant option;
create user nav identified by 'Nav@123';
grant all privileges on *.* to 'nav'@'localhost' identified by 'nav' with grant option;
grant all privileges on *.* to 'nav'@'%' identified by 'nav' with grant option;
create user navms identified by 'Navms@123';
grant all privileges on *.* to 'navms'@'localhost' identified by 'navms' with grant option;
grant all privileges on *.* to 'navms'@'%' identified by 'navms' with grant option;
create user oozie identified by 'Oozie@123';
grant all privileges on *.* to 'oozie'@'localhost' identified by 'oozie' with grant option;
grant all privileges on *.* to 'oozie'@'%' identified by 'oozie' with grant option;
select user,host from mysql.user;
+---------------+----------------+
| user | host |
+---------------+----------------+
| amon | % |
| hive | % |
| hue | % |
| nav | % |
| navms | % |
| oozie | % |
| rman | % |
| root | % |
| scm | % |
| sentry | % |
| root | 172.20.230.107 |
| amon | localhost |
| hive | localhost |
| hue | localhost |
| mysql.session | localhost |
| mysql.sys | localhost |
| nav | localhost |
| navms | localhost |
| oozie | localhost |
| rman | localhost |
| root | localhost |
| scm | localhost |
| sentry | localhost |
+---------------+----------------+
mysql> flush privileges;
5. ## 存放 MySQL JAR 包
[root@allnodes ~]# mkdir -p /usr/share/java
[root@allnodes ~]# scp root@172.20.179.23:/data/ibm/mysql/mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar /usr/share/java/mysql-connector-java.jar
6. # CM-Master部署:
1. ## 存放 CM 介质
[root@master ~]# mkdir -p /opt/cloudera-repo && mkdir -p /opt/parcel-repo
[root@master ~]# scp root@172.20.179.23:/data/CDH_ALL/6.3.2/cm/* /opt/cloudera-repo/
[root@master ~]# scp root@172.20.179.23:/data/CDH_ALL/6.3.2/parcels/6.3.2-parcels/* /opt/parcel-repo
[root@master ~]# yum install createrepo -y
[root@master ~]# createrepo /opt/cloudera-repo/ && createrepo /opt/parcel-repo
[root@master ~]# ll /opt/cloudera-repo/ && ll /opt/parcel-repo/
total 1380436
-rw-r--r-- 1 root root 14041 Nov 5 13:26 allkeys.asc
-rw-r--r-- 1 root root 10483568 Nov 5 13:26 cloudera-manager-agent-6.3.1-1466458.el7.x86_64.rpm
-rw-r--r-- 1 root root 1203832464 Nov 5 13:27 cloudera-manager-daemons-6.3.1-1466458.el7.x86_64.rpm
-rw-r--r-- 1 root root 11488 Nov 5 13:27 cloudera-manager-server-6.3.1-1466458.el7.x86_64.rpm
-rw-r--r-- 1 root root 10996 Nov 5 13:27 cloudera-manager-server-db-2-6.3.1-1466458.el7.x86_64.rpm
-rw-r--r-- 1 root root 14209868 Nov 5 13:27 enterprise-debuginfo-6.3.1-1466458.el7.x86_64.rpm
-rw-r--r-- 1 root root 184988341 Nov 5 13:27 oracle-j2sdk1.8-1.8.0+update181-1.x86_64.rpm
total 2033432
-rw-r----- 1 root root 2082186246 Nov 5 13:27 CDH-6.3.2-1.cdh6.3.2.p0.1605554-el7.parcel
-rw-r--r-- 1 root root 40 Nov 5 13:27 CDH-6.3.2-1.cdh6.3.2.p0.1605554-el7.parcel.sha1
-rw-r--r-- 1 root root 64 Nov 5 13:27 CDH-6.3.2-1.cdh6.3.2.p0.1605554-el7.parcel.sha256
-rw-r--r-- 1 root root 33887 Nov 5 13:27 manifest.json
2. ## 配置介质外发
[root@master ~]# systemctl restart httpd && systemctl enable httpd && systemctl status httpd
[root@master ~]# cd /var/www/html/
[root@master ~]# ln -sn /opt/cloudera-repo && ln -sn /opt/parcel-repo && ll
total 0
lrwxrwxrwx 1 root root 18 Nov 5 13:30 cloudera-repo -> /opt/cloudera-repo
lrwxrwxrwx 1 root root 16 Nov 5 13:30 parcel-repo -> /opt/parcel-repo
[root@master ~]# curl http://172.20.230.105/cloudera-repo/
[root@master ~]# curl <http://172.20.230.105/parcel-repo/>
3. ## 安装CM-Master
[root@master ~]# rpm -ivh /opt/cloudera-repo/cloudera-manager-daemons-6.3.1-1466458.el7.x86_64.rpm && rpm -ivh /opt/cloudera-repo/cloudera-manager-server-6.3.1-1466458.el7.x86_64.rpm
[root@master ~]# /opt/cloudera/cm/schema/scm_prepare_database.sh mysql -h 172.20.230.106 --scm-host 172.20.230.106 scm scm scm
7. # 添加CM-Agent:
CM: http://172.20.230.105/cloudera-repo/
Parcels:http://172.20.230.105/parcel-repo/
点击下一步
-
输入 CDH 名称
点击下一步
-
输入节点 IP 地址
点击下一步
-
配置 CM 软件包
点击更多选项 输入:http://172.20.230.105/parcel-repo/
点击下一步
-
跳过 Oracel JDK 自动安装流程
点击下一步
-
手动输入节点 用户名密码
输入linux系统用户和密码
点击下一步
-
自动部署 CM 包及 CDH 包
-
安装及优化组件
添加相应的服务组件
-
CM 优化
Event Server 索引目录:/mnt/disk1/cloudera-scm-eventserver Host Monitor 存储目录:/mnt/disk1/cloudera-host-Monitor Service Monitor 存储目录:/mnt/disk1/cloudera-service-Monitor Activity Monitor 日志目录:/mnt/disk1/cloudera-scm-firehose Alert Publisher 日志目录:/mnt/disk1/cloudera-scm-alertpublisher Event Server 日志目录:/mnt/disk1/cloudera-scm-eventserver Host Monitor 日志目录:/mnt/disk1/cloudera-scm-firehose Server Monitor 日志目录:/mnt/disk1/cloudera-scm-firehose Service Monitor 的最大非JAVA内存:6GB 内存不足时的转储堆:关闭
-
ZooKeeper 优化
添加实例
修改相应的参数配置
数据目录:/mnt/disk1/lib/zookeeper 事务日志:/mnt/disk1/lib/zookeeper ZooKeeper日志目录:/mnt/disk1/log/zookeeper 最大客户端连接数:300 ZooKeeper Server 的 Java 堆栈大小:2GB 内存不足时的转储堆:关闭
-
HDFS 优化
添加实例
修改相应的配置参数
-
整体参数修改:
NameNode 数据目录:/mnt/[disk1-disk2]/dfs/dn SecondaryNode 数据目录:/mnt/[disk1-disk2]/dfs/snn Balancer 日志目录:/mnt/disk1/log/hadoop-hdfs Failover Controller 日志目录:/mnt/disk1/log/hadoop-hdfs HttpFS 日志目录:/mnt/disk1/log/hadoop-httpfs JournalNode 日志目录:/mnt/disk1/log/hadoop-hdfs NFS Gateway 日志目录:/mnt/disk1/log/hadoop-hdfs NameNode 日志目录:/mnt/disk1/log/hadoop-hdfs SecondaryNameNode 日志目录:/mnt/disk1/log/hadoop-hdfs DataNode 的 Java 堆栈大小:4GB JournalNode 的 Java 堆栈大小:1GB NameNode 的 Java 堆栈大小:8GB Secondary NameNode 的 Java 堆栈大小:8GB 内存不足时的转储堆:关闭 dfs.datanode.handler.count: 50 dfs.datanode.max.transfer.threads: 16384 最大进程文件描述符数:65535 dfs.namenode.handler.count: 35 dfs.namenode.service.handler.count: 35
-
主机组-DNG 48C-377GB-7.3TB*5:
接受的DataNode失败卷:2
DataNode 数据目录:/mnt/[disk1-disk5]/dfs/dn
-
主机组-DNG 64C-256GB-7.3TB*6:
接受的DataNode失败卷:3
DataNode 数据目录:/mnt/[disk1-disk6]/dfs/dn
-
YARN优化
添加实例
修改相应的配置参数
-
整体参数的修改
日志聚合保留期: 5 天 正在运行作业的历史位置: /mnt/disk1/log/hadoop/mapreduce/history NodeManager恢复目录:/mnt/disk1/hadoop-yarn/yarn-nm-history JobHistory Server日志目录:/mnt/disk1/log/hadoop-mapreduce NodeManager日志目录:/mnt/disk1/log/hadoop-yarn ResourceManager日志目录:/mnt/disk1/log/hadoop-yarn NodeManager的Java堆栈大小:2GB ResourceManager的Java堆栈大小:4GB
内存不足时的转储堆:关闭
-
主机组-NMG 48C-377GB-7.3TB*5
NodeManager本地目录:/mnt/[disk1-disk5]/yarn/nn NodeManager容器日志目录:/mnt/[disk1-disk5]/yarn/container-logs 容器内存:335GB 容器虚拟CPU内核:48
-
主机组-NMG 64C-256GB-7.3TB*6
NodeManager本地目录:/mnt/[disk1-disk6]/yarn/nn NodeManager容器日志目录:/mnt/[disk1-disk6]/yarn/container-logs 容器内存:200GB 容器虚拟CPU内核:64