概述
简单解析版
1)需求:
去除日志中字段长度小于等于11的日志。
2)输入数据
数据有点大
3)实现代码:
(1)编写LogMapper
package com.atguigu.mapreduce.weblog; import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;
public class LogMapper extends Mapper<LongWritable, Text, Text, NullWritable>{
Text k = new Text();
@Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 获取1行数据 String line = value.toString();
// 2 解析日志 boolean result = parseLog(line,context);
// 3 日志不合法退出 if (!result) { return; }
// 4 设置key k.set(line);
// 5 写出数据 context.write(k, NullWritable.get()); }
// 2 解析日志 private boolean parseLog(String line, Context context) { // 1 截取 String[] fields = line.split(" ");
// 2 日志长度大于11的为合法 if (fields.length > 11) { // 系统计数器 context.getCounter("map", "true").increment(1); return true; }else { context.getCounter("map", "false").increment(1); return false; } } } |
(2)编写LogDriver
package com.atguigu.mapreduce.weblog; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class LogDriver {
public static void main(String[] args) throws Exception {
args = new String[] { "e:/input/inputlog", "e:/output1" };
// 1 获取job信息 Configuration conf = new Configuration(); Job job = Job.getInstance(conf);
// 2 加载jar包 job.setJarByClass(LogDriver.class);
// 3 关联map job.setMapperClass(LogMapper.class);
// 4 设置最终输出类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class);
// 设置reducetask个数为0 job.setNumReduceTasks(0);
// 5 设置输入和输出路径 FileInputFormat.setInputPaths(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 6 提交 job.waitForCompletion(true); } } |
复杂解析版
1)需求:
对web访问日志中的各字段识别切分
去除日志中不合法的记录
根据统计需求,生成各类访问请求过滤数据
2)输入数据
数据有点大
3)实现代码:
(1)定义一个bean,用来记录日志数据中的各数据字段
package com.atguigu.mapreduce.log;
public class LogBean { private String remote_addr;// 记录客户端的ip地址 private String remote_user;// 记录客户端用户名称,忽略属性"-" private String time_local;// 记录访问时间与时区 private String request;// 记录请求的url与http协议 private String status;// 记录请求状态;成功是200 private String body_bytes_sent;// 记录发送给客户端文件主体内容大小 private String http_referer;// 用来记录从那个页面链接访问过来的 private String http_user_agent;// 记录客户浏览器的相关信息
private boolean valid = true;// 判断数据是否合法
public String getRemote_addr() { return remote_addr; }
public void setRemote_addr(String remote_addr) { this.remote_addr = remote_addr; }
public String getRemote_user() { return remote_user; }
public void setRemote_user(String remote_user) { this.remote_user = remote_user; }
public String getTime_local() { return time_local; }
public void setTime_local(String time_local) { this.time_local = time_local; }
public String getRequest() { return request; }
public void setRequest(String request) { this.request = request; }
public String getStatus() { return status; }
public void setStatus(String status) { this.status = status; }
public String getBody_bytes_sent() { return body_bytes_sent; }
public void setBody_bytes_sent(String body_bytes_sent) { this.body_bytes_sent = body_bytes_sent; }
public String getHttp_referer() { return http_referer; }
public void setHttp_referer(String http_referer) { this.http_referer = http_referer; }
public String getHttp_user_agent() { return http_user_agent; }
public void setHttp_user_agent(String http_user_agent) { this.http_user_agent = http_user_agent; }
public boolean isValid() { return valid; }
public void setValid(boolean valid) { this.valid = valid; }
@Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(this.valid); sb.append(" |