- 配置hadoop-env.sh
export JAVA_HOME=/opt/module/jdk1.7.0_79
- 配置core-site.xml
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name> <value>hdfs://hadoop101:8020</value></property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.7.2/data/tmp</value>
</property>
- 配置hdfs-site.xml
<!-- 指定HDFS副本的数量 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
- 格式化namenode(第一次启动时格式化,以后就不要总格式化)
bin/hdfs namenode -format
- 启动namenode
sbin/hadoop-daemon.sh start namenode
- 启动DataNode
sbin/hadoop-daemon.sh start datanode
- 查看是否启动成功
jps
- web端查看HDFS文件系统
http://192.168.1.101:50070/dfshealth.html
注意:如果不能查看,看如下帖子处理
http://www.cnblogs.com/zlslch/p/6604189.html
- 在hdfs文件系统上创建一个input文件夹
bin/hdfs dfs -mkdir -p /user/wangxiaofan/mapreduce/wordcount/input
- 将测试文件内容上传到文件系统上
bin/hdfs dfs -put wcinput/wc.input /user/wangxiaofan/mapreduce/wordcount/input/
- 查看上传的文件是否正确
bin/hdfs dfs -ls /user/wangxiaofan/mapreduce/wordcount/input/
bin/hdfs dfs –cat /user/wangxiaofan/mapreduce/wordcount/input/wc.input
- 在Hdfs上运行mapreduce程序
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.2.jar wordcount /user/wangxiaofan/mapreduce/wordcount/input/ /user/wangxiaofan/mapreduce/wordcount/output
- 查看输出结果
bin/hdfs dfs -cat /user/wangxiaofan/mapreduce/wordcount/output/*
- 将测试文件内容下载到本地
hadoop fs -get /user/wangxiaofan/mapreduce/wordcount/output/part-r-00000 ./wcoutput/
- 删除输出结果
hdfs dfs -rmr /user/wangxiaofan/mapreduce/wordcount/output