WordCount

111 阅读2分钟

MapReduce 下的WC程序

mapper

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * KEYIN, map阶段输入的KEY的类型:LongWritable
 * VALUEIN, map阶段输入的VALUE的类型:text
 * KEYOUT, map阶段输出的key类型:Text
 * VALUEOUT,map阶段输出的value类型:intwritable
 */
public class WorkCountMapper extends Mapper <LongWritable, Text,Text, IntWritable>{
    private Text outK = new Text();
    private IntWritable outV=new IntWritable(1);

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //1.获取一行
        //root root
        String line = value.toString();
        //2.切割
        //root
        //root
        String[] words = line.split(" ");

        //3.循环写出
        for (String word : words) {
            //封装
            outK.set(word);
            //写出
            context.write(outK,outV);
        }
    }
}

Reducer

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * KEYIN, map阶段输入的KEY的类型:text
 * VALUEIN, map阶段输入的VALUE的类型:intwritable
 * KEYOUT, map阶段输出的key类型:Text
 * VALUEOUT,map阶段输出的value类型:intwritable
 */
public class WordCountReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
    private IntWritable outV = new IntWritable();
    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //root,(1,1)
        //ss,(1,1)
        int sum=0;
        for (IntWritable value : values) {
            sum+=value.get();
        }

        outV.set(sum);
        //写出
        context.write(key,outV);
    }
}

Driver

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.IOException;

public class WordCountDriver {


    private static boolean result;

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        //1.获取job
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        //2.设置jar包路径
        job.setJarByClass(WordCountDriver.class);
        //3.关联mapper和reducer
        job.setMapperClass(WorkCountMapper.class);
        job.setReducerClass(WordCountReducer.class);
        //4.设置map输出的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        //5.设置最终输出的kv类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        //6.设置输入路径和输出路径
        FileInputFormat.setInputPaths(job,new Path("D:\HadoopProject\MapReduceDemo\input"));
        //输出output为没有创建过的文件夹
        FileOutputFormat.setOutputPath(job,new Path("D:\HadoopProject\MapReduceDemo\output"));
        //7.提交job
        job.waitForCompletion(true);
        System.exit(result ? 0 : 1);

    }
}

Spark下的WC程序

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark01_WordCount {

  def main(args: Array[String]): Unit = {

    // Application
    // Spark框架
    // TODO 建立和Spark框架的连接
    // JDBC : Connection
    val sparConf = new SparkConf().setMaster("local").setAppName("WordCount")
    val sc = new SparkContext(sparConf)

    // TODO 执行业务操作

    // 1. 读取文件,获取一行一行的数据
    //    hello world
    val lines: RDD[String] = sc.textFile("datas/*")

    // 2. 将一行数据进行拆分,形成一个一个的单词(分词)
    //    扁平化:将整体拆分成个体的操作
    //   "hello world" => hello, world, hello, world
    val words: RDD[String] = lines.flatMap(_.split(" "))

    // 3. 将数据根据单词进行分组,便于统计
    //    (hello, hello, hello), (world, world)
    val wordGroup: RDD[(String, Iterable[String])] = words.groupBy(word=>word)

    // 4. 对分组后的数据进行转换
    //    (hello, hello, hello), (world, world)
    //    (hello, 3), (world, 2)
    val wordToCount = wordGroup.map {
      case ( word, list ) => {
        (word, list.size)
      }
    }

    // 5. 将转换结果采集到控制台打印出来
    val array: Array[(String, Int)] = wordToCount.collect()
    array.foreach(println)

    // TODO 关闭连接
    sc.stop()

  }