ip、hostname、firewalld、ssh免密登录、JDK 配置
1.ip 设置
vi /etc/sysconfg/network-scripts/ifcfg-ens33
BOOTPROTO=static
IPADDR=192.168.188.100 # IP
GATEWAY=192.168.188.2 # 网关 IP
DNS1=192.168.188.2
service network restart
ip addr
2.hostname
vi /etc/hostname
bigdata01
hostname
3.firewalld
systemctl stop firewalld
systemctl disable firewalld
4.ssh 免密登录
ssh-keygen -t rsa
# 一路回车
ll ~/.ssh
# 将公钥重定向输出到 authorized_keys 文件中
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
5.配置 JDK
vi /etc/profile
export JAVA_HOME=/data/soft/jdk1.8
export PATH=.:$JAVA_HOME/bin:$PATH
source /etc/profile
java -version
6.hadoop-env.sh、core-site.xml、hdfs-site.xml、mapred-site.xml、yarn-site.xml、workers
1.hadoop-env.sh
vi hadoop-env.sh
export JAVA_HOME=/data/soft/jdk1.8
export HADOOP_LOG_DIR=/data/hadoop_repo/logs/hadoop
2.core-site.xml
vi core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://bigdata01:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop_repo</value>
</property>
3.hdfs-site.xml
vi hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>bigdata01:50090</value>
</property>
4.mapred-site.xml
vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
5.yarn-site.xml
vi yarn-site.xml
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>bigdata01</value>
</property>
6.workers
vi workers
bigdata02
bigdata03
7.修改启动脚本 start-dfs.sh、stop-dfs.sh、start-yarn.sh、stop-yarn.sh
1.start-dfs.sh
cd ../../sbin
vi start-dfs.sh
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
# Start hadoop dfs daemons.
2.stop-dfs.sh
vi stop-dfs.sh
HDFS_DATANODE_USER=root
HDFS_DATANODE_SECURE_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
3.start-yarn.sh
vi start-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
4.stop-yarn.sh
vi stop-yarn.sh
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
cd /data/soft
scp -rq hadoop-3.2.0 bigdata02:/data/soft
scp -rq hadoop-3.2.0 bigdata03:/data/soft
cd hadoop-3.2.0
bin/hdfs namenode -format
sbin/start-all.sh
jps