- hadoop-env.sh
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop100:8020</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.7.2/data/tmp</value>
</property>
- hadoop-env.sh
export JAVA_HOME=/opt/module/jdk1.7.0_79
- hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop102:50090</value>
</property>
</configuration>
- slaves
hadoop100
hadoop101
hadoop102
- yarn-env.sh
export JAVA_HOME=/opt/module/jdk1.7.0_79
- yarn-site.xml
<!-- reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop101</value>
</property>
- mapred-env.sh
export JAVA_HOME=/opt/module/jdk1.7.0_79
- mapred-site.xml
<!-- 指定mr运行在yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
- 格式化&启动HDFS
100机器:bin/hdfs namenode –format
sbin/start-dfs.sh
- 启动yarn
sbin/start-yarn.sh
注意:Namenode和ResourceManger如果不是同一台机器,不能在NameNode上启动 yarn,应该在ResouceManager所在的机器上启动yarn。