多表關(guān)聯(lián)和單表關(guān)聯(lián)相似犯戏,都類似于數(shù)據(jù)庫中的自然連接势腮。相比單表關(guān)聯(lián),多表關(guān)聯(lián)的左右表和連接列更加清楚烈钞。所以可以采用和單表關(guān)聯(lián)的相同的處理方式泊碑,map識別出輸入的行屬于哪個(gè)表之后,對其進(jìn)行分割毯欣,將連接的列值保存在key中馒过,另一列和左右表標(biāo)識保存在value中,然后輸出酗钞。reduce拿到連接結(jié)果之后腹忽,解析value內(nèi)容,根據(jù)標(biāo)志將左右表內(nèi)容分開存放砚作,然后求笛卡爾積窘奏,最后直接輸出。
輸入是兩個(gè)文件葫录,一個(gè)代表工廠表着裹,包含工廠名列和地址編號列;
另一個(gè)代表地址表米同,包含地址編號列和地址名列骇扇。
期望輸出:
完整代碼:
package mr;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class MyAddress {
static class MyAddressMapper extends Mapper<LongWritable, Text, Text, Text>{
public void map(LongWritable k1, Text v1, Context context)
throws java.io.IOException, java.lang.InterruptedException
{
String[] lines= v1.toString().split("\t");
if(lines[0].equals("factoryname") || lines[0].equals("addressID")) return;
String word1=lines[0];
String word2=lines[1];
if(word1.charAt(0)>='0'&&word1.charAt(0)<='9'){
context.write(new Text(word1), new Text("1"+","+word1+","+word2));
}
else if(word2.charAt(0)>='0'&&word2.charAt(0)<='9'){
context.write(new Text(word2), new Text("2"+","+word1+","+word2));
}
else return;
System.out.println("map......"+word1+","+word2);
}
}
static class MyAddressReduce extends Reducer<Text, Text, Text, Text>{
protected void setup(Context context)
throws java.io.IOException, java.lang.InterruptedException{
context.write(new Text("factory\t"),new Text("address"));
}
public void reduce(Text key, Iterable<Text> values, Context context) throws java.io.IOException, java.lang.InterruptedException
{
List<String> fname=new ArrayList();
List<String> aname=new ArrayList();
Iterator<Text> it=values.iterator();
while(it.hasNext()){
String lines=it.next().toString();
String[] words=lines.split(",");
if(words[0].equals("1")){
aname.add(words[2]);
}
else if(words[0].equals("2")){
fname.add(words[1]);
}
else return;
}
for(String fn:fname){
for(String an:aname){
context.write(new Text(fn+"\t"), new Text(an));
}
}
System.out.println("reduce......");
}
}
private static String INPUT_PATH="hdfs://master:9000/input/fname.txt";
private static String INPUT_PATH2="hdfs://master:9000/input/aname.txt";
private static String OUTPUT_PATH="hdfs://master:9000/output/MyAddressResult/";
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
FileSystem fs=FileSystem.get(new URI(OUTPUT_PATH),conf);
if(fs.exists(new Path(OUTPUT_PATH)))
fs.delete(new Path(OUTPUT_PATH));
Job job=new Job(conf,"myjob");
job.setJarByClass(MyAddress.class);
job.setMapperClass(MyAddressMapper.class);
job.setReducerClass(MyAddressReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job,new Path(INPUT_PATH));
FileInputFormat.addInputPath(job,new Path(INPUT_PATH2));
FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH));
job.waitForCompletion(true);
}
}