[Hadoop Mapreduce:缩小方法未调用

问题描述 投票:0回答:1

我正在写一个mapreduce的节律。

在我的代码中未调用reduce(Text key, Iterable<String> values, Context context)方法。在它上面,我有@Override给出错误:Method does not override method from its superclass

这是我的代码:

package WordCountP;

import java.io.FileReader;
import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;


public class popularity extends Configured implements Tool{

    public class PopularityMapper extends Mapper<Text, Text, Text, Text> {

        @Override
        protected void map(Text key, Text value,
                           Context context)
                throws IOException, InterruptedException {

                JSONParser jsonParser = new JSONParser();
                try {
                    JSONObject jsonobject = (JSONObject) jsonParser.parse(new FileReader("src\\testinput.json"));
                    JSONArray jsonArray = (JSONArray) jsonobject.get("votes");

                    Iterator<JSONObject> iterator = jsonArray.iterator();
                    while(iterator.hasNext()) {
                        JSONObject obj = iterator.next();
                        String song_id_rave_id = (String) obj.get("song_ID") + "|" + (String) obj.get("rave_ID");
                        String preference = (String) obj.get("preference");
                        System.out.println(song_id_rave_id + "||" + preference);
                        context.write(new Text(song_id_rave_id), new Text(preference));
                    }
                }catch(ParseException e) {
                    e.printStackTrace();
                }
        }

    }

    public class PopularityReducer extends Reducer<Text, Iterable<String>, Text, Text> {

        @Override
        protected void reduce(Text key, Iterable<String> values, Context context)
                throws IOException, InterruptedException {

            int sum = 0;
            for ( String val: values){
                if (val == "true"){
                    sum +=1;
                }
                else if (val == "false"){
                    sum -=1;
                }

            }
            String result = Integer.toString(sum);
            context.write(new Text(key), new Text(result));
        }
    }



    public static void main(String[] args) throws Exception{
        int exitCode = ToolRunner.run(new popularity(), args);
        System.exit(exitCode);
    }



    public int run(String[] args) throws Exception {
        if (args.length != 2) {
            System.err.printf("Usage: %s [generic options] <input> <output>\n",
                    getClass().getSimpleName());
            ToolRunner.printGenericCommandUsage(System.err);
            return -1;
        }

        Job job = new org.apache.hadoop.mapreduce.Job();
        job.setJarByClass(popularity.class);
        job.setJobName("PopularityCounter");

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        job.setOutputFormatClass(TextOutputFormat.class);
        job.setMapperClass(PopularityMapper.class);
        job.setReducerClass(PopularityReducer.class);

        int returnValue = job.waitForCompletion(true) ? 0:1;
        System.out.println("job.isSuccessful " + job.isSuccessful());
        return returnValue;
    }
}

我尝试用R大写字母(Reduce())命名,但也没有用。我认为给该方法的参数有误,但是我看不到任何问题……有什么想法吗?

第二,有什么方法可以将输出格式设置为.txt文件?

仅供参考,我输入的JSON代码是

{"votes":[{
    "song_ID": "Piece of your heart",
    "mbr_ID": "001",
    "preference": "true",
    "timestamp": "11:22:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Piece of your heart",
    "mbr_ID": "002",
    "preference": "true",
    "timestamp": "11:22:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "001",
    "preference": "false",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "002",
    "preference": "false",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    },
    {
    "song_ID": "Atje voor de sfeer",
    "mbr_ID": "003",
    "preference": "true",
    "timestamp": "11:44:33",
    "rave_ID": "rave001",
    }]
}


谢谢!

java hadoop mapreduce reduce
1个回答
0
投票

同时检查了IOException和InterruptedException,因此不会重写reduce方法。

Reducer类中的reduce方法不会引发任何异常,因此您不能声明子类中的reduce方法会引发任何已检查的异常,但是它可以引发未检查的异常。

您可能想在reduce方法中处理异常。

© www.soinside.com 2019 - 2024. All rights reserved.