本文共 4491 字,大约阅读时间需要 14 分钟。
org.apache.hadoop hadoop-common 2.6.0 org.apache.hadoop hadoop-hdfs 2.6.0 org.apache.hadoop hadoop-client 2.6.0 org.apache.hadoop hadoop-mapreduce-client-core 2.6.0 commons-logging commons-logging 1.2
package hadoop.mapreduce;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;/** * @author sunyong * @date 2020/07/01 * @description * KEYIN:输入的key类型 * VALUEIN:输入的value类型 * KEYOUT:输出的key类型 * VALUEOUT:输出的value类型 */public class WCMapper extends Mapper{ Text k = new Text(); IntWritable v = new IntWritable(1); @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { //1.将文本转化成字符串 String line = value.toString(); //2.将字符串切割 String[] words = line.split("\\s+"); //3.循环遍历,将每一个单词写出去 for (String word : words) { k.set(word); context.write(k,v); } }}
package hadoop.mapreduce;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;/** * @author sunyong * @date 2020/07/01 * @description * KEYIN:reduce端输入的key类型,即map端输出的key类型 * VALUEIN:reduce输入的value类型,即map端输出的value类型 * KEYOUT:reduce输出的key类型 * VALUEOUT:reduce输出的value类型 */public class WCReducer extends Reducer< Text,IntWritable,Text, IntWritable> { IntWritable v = new IntWritable(); int sum; @Override protected void reduce(Text key, Iterablevalues, Context context) throws IOException, InterruptedException { //reduce端接收到的类型大概是这样的 (wish,(1,1,1,1)) //对迭代器进行累加求和 //sum必须赋值为0初始化,因为reduce方法是每个键都会执行一次 sum=0; for (IntWritable count : values) { sum+=count.get(); } v.set(sum); //将key和value进行写出 context.write(key,v); }}
package hadoop.mapreduce;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.io.IOException;/** * @author sunyong * @date 2020/07/01 * @description */public class WCDriver { public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException { //1.创建配置文件,创建Job Configuration conf = new Configuration(); Job job = Job.getInstance(conf,"wordcount"); //2.设置jar的位置,参数为本类类名.class job.setJarByClass(WCDriver.class); //3.设置map和reduce的位置 job.setMapperClass(WCMapper.class); job.setReducerClass(WCReducer.class); //4.设置map输出端的key,value类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); //5.设置reduce输出的key,value类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); //6.设置输入和输出路径,输入的是本地自己建的txt文件,会输出一个test目录 FileInputFormat.setInputPaths(job,new Path("F:\\sunyong\\Java\\codes\\javaToHdfs\\download\\a.txt")); FileOutputFormat.setOutputPath(job,new Path("test")); //7.提交程序运行 boolean result = job.waitForCompletion(true); System.exit(result?0:1); }}
//6.设置输入输出路径 FileInputFormat.setInputPaths(job,new Path(args[0])); FileOutputFormat.setOutputPath(job,new Path(args[1]));
hdfs dfs -mkdir /input
,hdfs dfs -put /tmp/test.txt /input/
hadoop jar /opt/install/hadoop/lib/javaToHdfs.jar hadoop.mapreduce.WCDriver /input/test.txt /output
hdfs dfs -text /output/part-*
转载地址:http://gdjxi.baihongyu.com/