linux搭建hadoop服务

55 阅读1分钟

1.创建xsync与jpsall

mkdir -p /home/atguigu/bin

vim xsync

#!/bin/bash

# 1. 判断参数个数
if [ $# -lt 1 ]; then
    echo "Not Enough Arguement!"
    exit 1
fi

# 2. 遍历集群所有机器
for host in hadoop102 hadoop103 hadoop104; do
    echo "==================== $host ===================="

    # 3. 遍历所有目录,挨个发送
    for file in "$@"; do
        # 4. 判断文件是否存在
        if [ -e "$file" ]; then
            # 5. 获取父目录
            pdir=$(cd -P "$(dirname "$file")"; pwd)
            # 6. 获取当前文件的名称
            fname=$(basename "$file")

            ssh "$host" "mkdir -p $pdir"
            rsync -av "$pdir/$fname" "$host:$pdir"
        else
            echo "$file does not exist!"
        fi
    done
done

vim jpsall

#!/bin/bash

for host in hadoop102 hadoop103 hadoop104

do

echo =============== $host ===============

ssh $host jps

done

2.编辑/etc/profile.d/my_env.sh

#JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_212
export PATH=$PATH:$JAVA_HOME/bin
#HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
#KAFKA_HOME
export KAFKA_HOME=/opt/module/kafka
export PATH=$PATH:$KAFKA_HOME/bin

source /etc/profile.d/my_env.sh

3.修改文件组

在 hadoop102、hadoop103、hadoop104 都已经创建好的/opt/module 并且已经把这个目录修改为 atguigu:atguigu

102服务器: sudo chown atguigu:atguigu -R /opt/module

scp -r /opt/module/jdk1.8.0_212 atguigu@hadoop103:/opt/module

scp -r /opt/module/jdk1.8.0_212 atguigu@hadoop104:/opt/module

4.修改hadoop相关配置文件

cd $HADOOP_HOME/etc/hadoop

编辑 core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
 <!-- 指定 NameNode 的地址 -->
 <property>
 <name>fs.defaultFS</name>
 <value>hdfs://hadoop102:8020</value>
 </property>
 <!-- 指定 hadoop 数据的存储目录 -->
 <property>
 <name>hadoop.tmp.dir</name>
 <value>/opt/module/hadoop-3.1.3/data</value>
 </property>
 <!-- 配置 HDFS 网页登录使用的静态用户为 atguigu -->
 <property>
 <name>hadoop.http.staticuser.user</name>
 <value>atguigu</value>
 </property>
</configuration>

编辑 hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- nn web 端访问地址-->
<property>
 <name>dfs.namenode.http-address</name>
 <value>hadoop102:9870</value>
 </property>
<!-- 2nn web 端访问地址-->
 <property>
 <name>dfs.namenode.secondary.http-address</name>
 <value>hadoop104:9868</value>
 </property>
</configuration>

编辑 yarn-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
 <!-- 指定 MR 走 shuffle -->
 <property>
  <name>yarn.nodemanager.aux-services</name>
  <value>mapreduce_shuffle</value>
 </property>
 <!-- 指定 ResourceManager 的地址-->
 <property>
  <name>yarn.resourcemanager.hostname</name>
  <value>hadoop103</value>
 </property>
 <!-- 环境变量的继承 -->
 <property>
  <name>yarn.nodemanager.env-whitelist</name>
  <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
 </property>
 <!-- 开启日志聚集功能 -->
 <property>
  <name>yarn.log-aggregation-enable</name>
  <value>true</value>
 </property>
 <!-- 设置日志聚集服务器地址 -->
 <property> 
  <name>yarn.log.server.url</name> 
  <value>http://hadoop102:19888/jobhistory/logs</value>
 </property>
 <!-- 设置日志保留时间为 7 天 -->
 <property>
  <name>yarn.log-aggregation.retain-seconds</name>
  <value>604800</value>
 </property>
 <!--是否启动一个线程检查每个任务正使用的物理内存量,如果任务超出分配值,则直接将其杀掉,默认是true -->
 <property>
  <name>yarn.nodemanager.pmem-check-enabled</name>
  <value>false</value>
 </property>
 <!--是否启动一个线程检查每个任务正使用的虚拟内存量,如果任务超出分配值,则直接将其杀掉,默认是true -->
 <property>
  <name>yarn.nodemanager.vmem-check-enabled</name>
  <value>false</value>
 </property>    
 <property>
  <name>yarn.log.server.url</name>
  <value>http://hadoop102:19888/jobhistory/logs</value>
 </property>
</configuration>

编辑 mapred-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定 MapReduce 程序运行在 Yarn 上 -->
 <property>
 <name>mapreduce.framework.name</name>
 <value>yarn</value>
 </property>
 <!-- 历史服务器端地址 -->
 <property>
  <name>mapreduce.jobhistory.address</name>
  <value>hadoop102:10020</value>
 </property>
 <!-- 历史服务器 web 端地址 -->
 <property>
  <name>mapreduce.jobhistory.webapp.address</name>
  <value>hadoop102:19888</value>
 </property>
</configuration>

xsync同步文件

xsync /opt/module/hadoop-3.1.3/etc/hadoop/

编辑 workers添加内容

hadoop102
hadoop103
hadoop104

注意:该文件中添加的内容结尾不允许有空格,文件中不允许有空行

5.编写hadoop集群管理脚本

#!/bin/bash

if [ $# -lt 1 ]; then
    echo "No Args Input..."
    exit
fi

case $1 in
    "start")
        echo " =================== 启动 hadoop 集群 ==================="
        echo " --------------- 启动 hdfs ---------------"
        ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/start-dfs.sh"
        echo " --------------- 启动 yarn ---------------"
        ssh hadoop103 "/opt/module/hadoop-3.1.3/sbin/start-yarn.sh"
        echo " --------------- 启动 historyserver ---------------"
        ssh hadoop102 "/opt/module/hadoop-3.1.3/bin/mapred --daemon start historyserver"
        ;;
    "stop")
        echo " =================== 关闭 hadoop 集群 ==================="
        echo " --------------- 关闭 historyserver ---------------"
        ssh hadoop102 "/opt/module/hadoop-3.1.3/bin/mapred --daemon stop historyserver"
        echo " --------------- 关闭 yarn ---------------"
        ssh hadoop103 "/opt/module/hadoop-3.1.3/sbin/stop-yarn.sh"
        echo " --------------- 关闭 hdfs ---------------"
        ssh hadoop102 "/opt/module/hadoop-3.1.3/sbin/stop-dfs.sh"
        ;;
    *)
        echo "Input Args Error..."
        ;;
esac