概述
自定义outputformat 案例练习代码实现思路(作用在Reduce之后):
- 根据需求,我们要将一个输入文件中的包含 hadoop 单词的数据放在一个输出文件中,
不包含hadoop单词的数据放在另外一个输出文件中 - 首先需要在mapper中,将读取的kv不做任何处理,直接写出
- 然后自定义outputformat的实现类,在recordwriter方法中返回一个我们自定义的
recordwriter类,这个类需要继承recordwriter - 在自定义的recordwriter中,先定义两个输出流 FSDataoutputStream,一个用于
往外写包含hadoop单词的数据,一个用于往外写不包含hadoop单词的数据,在构造器中
对这两个流对象做初始化操作 - 将具体的判断和写数据的逻辑代码写在write方法中
- 最后在driver中注册我们自定义的outputformat类即可
自定义 Mapper:
package com.mr.output1;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author kate
*/
public class FilterMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 根据需求编写逻辑代码
// 写出
context.write(value, NullWritable.get());
}
}
自定义 Reduce:
package com.mr.output1;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @author kate
*/
public class FilterReducer extends Reducer<Text, NullWritable,Text, NullWritable> {
Text k=new Text();
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
// 1 获取一行
String line = key.toString();
// 2 拼接 目的 输出文件自动换行
line = line + "rn";
// 3 设置key
k.set(line);
// 4 输出
context.write(k, NullWritable.get());
}
}
自定义OutPutFormat 类:
package com.mr.output1;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable> {
@Override
public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
// 创建一个RecordWriter
return new FilterRecordWriter(job);
}
}
自定义写出类:
package com.mr.output1;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import java.io.IOException;
/**
* @author kate
*/
public class FilterRecordWriter extends RecordWriter<Text, NullWritable> {
FSDataOutputStream hadoopOut = null;
FSDataOutputStream otherOut = null;
public FilterRecordWriter(TaskAttemptContext job) {
// 1 获取文件系统
FileSystem fs;
try {
fs = FileSystem.get(job.getConfiguration());
// 2 创建输出文件路径
Path hadoopPath = new Path("F:/mapreduce/src/main/resources/output/hadoop.txt");
Path otherPath = new Path("F:/mapreduce/src/main/resources/output/other.log");
// 3 创建输出流
hadoopOut = fs.create(hadoopPath);
otherOut = fs.create(otherPath);
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void write(Text key, NullWritable value) throws IOException, InterruptedException {
// 判断是否包含“hadoop”输出到不同文件
if (key.toString().contains("hadoop")) {
hadoopOut.write(key.toString().getBytes());
} else {
otherOut.write(key.toString().getBytes());
}
}
@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
// 关闭资源
IOUtils.closeStream(hadoopOut);
IOUtils.closeStream(otherOut);
}
}
自定义Driver类:
package com.mr.output1;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* @author kate
*/
public class FilterDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(FilterDriver.class);
job.setMapperClass(FilterMapper.class);
job.setReducerClass(FilterReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
// 要将自定义的输出格式组件设置到job中
job.setOutputFormatClass(FilterOutputFormat.class);
//设置路径,包括了输入文件和输出路径
FileInputFormat.setInputPaths(job, new Path("F:/mapreduce/src/main/resources/input/log.txt"));
FileSystem fs = FileSystem.get(conf);
Path path = new Path("F:/mapreduce/src/main/resources/output1");
if (fs.exists(path)) {
fs.delete(path, true);
}
//输出路径的文件夹不能存在,若存在,则报错。
FileOutputFormat.setOutputPath(job, path);
//提交job
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
最后
以上就是无情西装为你收集整理的Hadoop 自定义OutPutFormat 输出的全部内容,希望文章能够帮你解决Hadoop 自定义OutPutFormat 输出所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复