mapreduce对一维的数组进行排序

import java.io.IOException;


import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;


public class SortMapper extends MapReduceBase implements Mapper<LongWritable, Text, IntWritable,Text> {

	@Override
	public void map(LongWritable key, Text value, OutputCollector<IntWritable,Text> output,
			Reporter reporter) throws IOException {
		// TODO Auto-generated method stub
		
		String line=value.toString();
		System.out.println(line);
		output.collect(new IntWritable(Integer.parseInt(line)),new Text());
		
	}

}
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;


public class Sort {

	/**
	 * @param args
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
		// TODO Auto-generated method stub
		if(args.length!=2){
			System.out.println("ge shi cuowu");
			System.exit(-1);
		}
		
		JobConf conf=new JobConf(Sort.class);
		
		conf.setJobName("Sort Test");
		
		
		FileInputFormat.setInputPaths(conf, new Path(args[0]));
		//FileOutputFormat.setCompressOutput(conf, true);
		FileOutputFormat.setOutputPath(conf, new Path(args[1]));
		conf.setMapperClass(SortMapper.class);
		//conf.setOutputKeyClass(Text.class);
		//conf.setOutputValueClass(IntWritable.class);
		conf.setMapOutputKeyClass(IntWritable.class);
		conf.setMapOutputValueClass(Text.class);
		
		JobClient.runJob(conf);

	}

}
原文地址:https://www.cnblogs.com/dlutxm/p/2145887.html