MR的計算是可以嵌套使用的甲葬,比如在現實業(yè)務中有時候我們可能要求先求出總結果,在對總結果進行排序滑进,當排序的值是number類型,當直接排序疟赊。當需要排序的是javaBean郊供,則首先要對其進行序列化。
【1】hadoop中的序列化
Hadoop的的序列化不采用的Java的序列化近哟,而是實現了自己的序列化機制驮审。
Hadoop的通過Writable接口實現的序列化機制,不過沒有提供比較功能吉执,所以和Java的的中Comparable接口合并疯淫,提供一個接口WritableComparable。
1 > Writable 的使用(當不需要進行排序而只是對數據的持久化等使用)
要實現序列化的bean要實現Writable 接口并復寫他的兩個方法(序列化戳玫、反序列化)
public class DataBean implements Writable{
private String tel;
private long upPayLoad;
private long downPayLoad;
private long totalPayLoad;
public DataBean(){}
//一般為了方便使用熙掺,都會給一個全參的構造方法
public DataBean(String tel, long upPayLoad, long downPayLoad) {
super();
this.tel = tel;
this.upPayLoad = upPayLoad;
this.downPayLoad = downPayLoad;
this.totalPayLoad = upPayLoad + downPayLoad;
}
@Override
public String toString() {
return this.upPayLoad + "\t" + this.downPayLoad + "\t" + this.totalPayLoad;
}
//反序列化
public void write(DataOutput out) throws IOException {
out.writeUTF(tel);
out.writeLong(upPayLoad);
out.writeLong(downPayLoad);
out.writeLong(totalPayLoad);
}
//序列化
public void readFields(DataInput in) throws IOException {
this.tel = in.readUTF();
this.upPayLoad = in.readLong();
this.downPayLoad = in.readLong();
this.totalPayLoad = in.readLong();
}
//getter 和 setter方法
}
2 > WritableComparable的使用(當即需要持久化也需要排序)
當即需要對bean進行持久化又要實現某種排序,則要實現WritableComparable接口并復寫三個方法(序列化咕宿、反序列化币绩、比較)
public class InfoBean implements WritableComparable<InfoBean> {
private String account;//key郵箱
private double income;//收入
private double expenses;//支出
private double surplus;//結余
//set方法(含參數構造方法)
public void set(String account,double income,double expenses){
this.account = account;
this.income = income;
this.expenses = expenses;
this.surplus = income - expenses;
}
//反序列化
public void write(DataOutput out) throws IOException {
out.writeUTF(account);
out.writeDouble(income);
out.writeDouble(expenses);
out.writeDouble(surplus);
}
//序列化
public void readFields(DataInput in) throws IOException {
this.account = in.readUTF();
this.income = in.readDouble();
this.expenses = in.readDouble();
this.surplus = in.readDouble();
}
//排序方法
public int compareTo(InfoBean o) {
//先比較收入,當輸入相等
if(this.income == o.getIncome()){
//比支出
return this.expenses > o.getExpenses() ? 1 : -1;
}
return this.income > o.getIncome() ? 1 : -1;
}
@Override
public String toString() {
return income + "\t" + expenses + "\t" + surplus;
}
//getter 和 setter方法
}
【案例】計算出用戶的總輸入和總支出府阀,并排序(如果總輸入相等在按總支出排序)
【1】源數據
zhangsan@163.com 6000 0 2014-02-20
lisi@163.com 2000 0 2014-02-20
lisi@163.com 0 100 2014-02-20
zhangsan@163.com 3000 0 2014-02-20
wangwu@126.com 9000 0 2014-02-20
wangwu@126.com 0 200 2014-02-20
【2】結果數據
lisi@163.com 2000.0 100.0 1900.0
zhangsan@163.com 9000.0 0.0 9000.0
wangwu@126.com 9000.0 200.0 8800.0
【3】實現原理:即實現一個sum的MR對數據進行sum計算缆镣,在將sum的輸出結果當做源數據編寫sort計算,sort的計算結果就是最終結果數據
【4】代碼實現:
文件一:InfoBean.java
public class InfoBean implements WritableComparable<InfoBean> {
private String account;//key郵箱
private double income;//收入
private double expenses;//支出
private double surplus;//結余
//set方法(含參數構造方法)
public void set(String account,double income,double expenses){
this.account = account;
this.income = income;
this.expenses = expenses;
this.surplus = income - expenses;
}
//反序列化
public void write(DataOutput out) throws IOException {
out.writeUTF(account);
out.writeDouble(income);
out.writeDouble(expenses);
out.writeDouble(surplus);
}
//序列化
public void readFields(DataInput in) throws IOException {
this.account = in.readUTF();
this.income = in.readDouble();
this.expenses = in.readDouble();
this.surplus = in.readDouble();
}
@Override
public int compareTo(InfoBean o) {
//先比較收入试浙,當輸入相等
if(this.income == o.getIncome()){
//比支出
return this.expenses > o.getExpenses() ? 1 : -1;
}
return this.income > o.getIncome() ? 1 : -1;
}
@Override
public String toString() {
return income + "\t" + expenses + "\t" + surplus;
}
//getter setter方法
}
文件二:求和SumStep.java
public class SumStep {
public static class SumMapper extends Mapper<LongWritable, Text, Text, InfoBean> {
private Text k = new Text();
private InfoBean v = new InfoBean();
protected void map(LongWritable key, Text value, Context context)
throws java.io.IOException ,InterruptedException {
String line = value.toString();
String[] fields = line.split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account);
v.set(account, in, out);
context.write(k, v);
//context.write(new Text(), new InfoBean());//這里為了避免多次new對象占用資源改為上方提前new好
};
}
public static class SumReduce extends Reducer<Text, InfoBean, Text, InfoBean> {
private InfoBean v = new InfoBean();
protected void reduce(Text key, Iterable<InfoBean> values, Context context)
throws java.io.IOException ,InterruptedException {
double in_sum = 0 ;
double out_sum = 0 ;
for(InfoBean bean : values){
in_sum += bean.getIncome();
out_sum += bean.getExpenses();
}
v.set("", in_sum, out_sum);
context.write(key, v);
};
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(SumStep.class);
job.setMapperClass(SumMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(InfoBean.class);
FileInputFormat.setInputPaths(job, new Path("/mrDemo/input/sum_sort"));
job.setReducerClass(SumReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path("/mrDemo/output/sum_sort"));
job.waitForCompletion(true);
}
}
//打jar包執(zhí)行:hadoop jar /root/Desktop/mr_JAR/sumAndSort.jar
文件三:排序 SortStep.java
public class SortStep {
public static class SortMapper extends Mapper<LongWritable, Text, InfoBean, NullWritable> {
private InfoBean k = new InfoBean();
protected void map(LongWritable key, Text value, Context context)
throws java.io.IOException ,InterruptedException {
String line = value.toString();
String[] fields = line.split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account, in, out);
context.write(k, NullWritable.get());
};
}
public static class SortReduce extends Reducer<InfoBean, NullWritable, Text, InfoBean> {
private Text k = new Text();
protected void reduce(InfoBean bean, Iterable<NullWritable> values, Context context)
throws java.io.IOException ,InterruptedException {
String account = bean.getAccount();
k.set(account);
context.write(k, bean);
};
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(SortStep.class);
job.setMapperClass(SortMapper.class);
job.setMapOutputKeyClass(InfoBean.class);
job.setMapOutputValueClass(NullWritable.class);
FileInputFormat.setInputPaths(job, new Path("/mrDemo/output/sum_sort/part-r-00000"));
job.setReducerClass(SortReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path("/mrDemo/output/sumAndSort"));
job.waitForCompletion(true);
}
}
//打jar包執(zhí)行 hadoop jar /root/Desktop/mr_JAR/sumAndSort1.jar
注意:MR自身會使用快速排序將key排序董瞻,key必須可序列化