为了识别图中度数最低的顶点,我们有两个实现两个工作。这称为 Hadoop MapReduce 编程中的链接作业
工作 1
Job one 将计算每个顶点的度数。 Reducer 会将输出保存到 HDFS 文件夹。
工作 2
作业 2 将读取作业 1 的输出作为输入,并找到度数最低的顶点。
请在下面找到用 JAVA 编写的详细代码。程序的参数是输入文本文件和输出文件
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class LowestDegreeOfGraph {
public static class Map extends Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text word = new Text();
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
context.write(word, one);
}
}
}
public static class MapForFindingLowDegree extends Mapper<LongWritable, Text, Text, Text> {
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text word = new Text();
String[] line = value.toString().split("\t");
context.write(new Text("vertexWithLowestDegree"), new Text(line[0]+","+line[1]));
}
}
public static class Reduce extends Reducer<Text, IntWritable, Text, IntWritable> {
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
context.write(key, new IntWritable(sum));
}
}
public static class ReduceForFindingLowDegree extends Reducer<Text, Text, Text, Text> {
@Override
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
int flag = 0;
int vertex=0;
int degree=0;
for (Text value : values) {
String[] line=value.toString().split(",");
if(flag==0){
vertex=Integer.parseInt(line[0]);
degree=Integer.parseInt(line[1]);
flag=1;
}else if(degree>Integer.parseInt(line[1])){
vertex=Integer.parseInt(line[0]);
degree=Integer.parseInt(line[1]);
}
}
context.write(new Text("Vertex with the lowest degree "+vertex),new Text("Degree "+degree));
}
}
public static void main(String[] args) throws Exception {
Configuration conf=new Configuration();
conf.set("fs.default.name", "hdfs://localhost:8020");
Job job = new Job(conf);
job.setJarByClass(LowestDegreeOfGraph.class);
job.setJobName("For calculating the degree for every vertex");
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path("/opt/"));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
Job job2 = new Job(conf);
job2.setJarByClass(LowestDegreeOfGraph.class);
job2.setJobName("Identifying the Vertex with lowest degree");
FileInputFormat.addInputPath(job2, new Path("/opt/"));
FileOutputFormat.setOutputPath(job2, new Path((args[1])));
job2.setMapperClass(MapForFindingLowDegree.class);
job2.setReducerClass(ReduceForFindingLowDegree.class);
job2.setOutputKeyClass(Text.class);
job2.setOutputValueClass(Text.class);
job2.waitForCompletion(true);
}
}
arg[0] 是输入文件。 PFB 示例输入文件。
示例输入文件
1 2
1 3
2 3
1 4
arg[1] 是输出文件的位置。
请在运行 JOB 时提供 arg[0] 和 arg[1]。
/opt 是存储临时数据的临时位置。