今天继续学习hadoop
OutputFormat:
这是在数据输出到文件之前的一步,通过这一步可以设置将数据输入到mysql 文件等
可以根据自己的需求输入到不同的存储中
*******进入reducer 的都是key值相同的集合
学习写了一个写入log文件的案例
Mapper
public class LogMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//不做任何处理,获取到一行数据直接写出
context.write(value,NullWritable.get());
}
}
reducer
public class LogReduce extends Reducer<Text, NullWritable,Text, NullWritable> {
//进入reducer 的都是key值相同的集合
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
//防止相同的网址丢失
for (NullWritable value : values) {
context.write(key,NullWritable.get());
}
}
}
driver
public class LogDriver {
public static void main(String[] args) throws ClassNotFoundException, InterruptedException, IOException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(LogDriver.class);
job.setMapperClass(LogMapper.class);
job.setReducerClass(LogReduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//设置自定义的 outputformat
job.setOutputFormatClass(LogOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path("E:\hadoop\datatest\outputformat"));
// 虽 然 我 们 自 定 义 了 outputformat , 但 是 因 为 我 们 的 outputformat 继承自fileoutputformat
//而 fileoutputformat 要输出一个_SUCCESS 文件,所以在这还得指定一个输出目录
FileOutputFormat.setOutputPath(job, new Path("E:\hadoop\datatest\logoutput"));
boolean b = job.waitForCompletion(true);
System.exit(b ? 0 : 1);
}
}
LogOutPutFormat
public class LogOutputFormat extends FileOutputFormat<Text, NullWritable>
{
@Override
public RecordWriter<Text, NullWritable>
getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
LogRecordWriter lrw=new LogRecordWriter(job);
return lrw;
}
}
LogRecordWriter
public class LogRecordWriter extends RecordWriter<Text, NullWritable>
{
private
FSDataOutputStream
baidu;
private
FSDataOutputStream
other;
public LogRecordWriter(TaskAttemptContext job) {
//获取两条流
try {
FileSystem fs = FileSystem.get(job.getConfiguration());
baidu = fs.create(new Path("E:\hadoop\datatest\baidu.log"));
other = fs.create(new Path("E:\hadoop\datatest\other.log"));
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public void write(Text key, NullWritable nullWritable) throws IOException, InterruptedException {
//具体写
String log = key.toString();
//根据一行的 log 数据是否包含
atguigu,判断两条输出流输出的内容
if (log.contains("baidu")) {
baidu.writeBytes(log + "
");
} else {
other.writeBytes(log + "
");
}
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
//关流
IOUtils.closeStream(baidu);
IOUtils.closeStream(other);
}
}
学习时间 :13:02到15:34