mapreduce输出乱码的处理,主要在于数据的编码,默认是以utf-8编码。
若数据源不是以utf-8编码,如以gbk编码的。若在mapreduce中要以gbk进行解码。
核心语句:
String str = new String(value.getBytes(), 0, value.getLength(), "GBK");
解释:value.getBytes()是获取字节码数据,转换成字符码是需要加相应编码。
程序实验:
package org.two; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import java.io.IOException; public class CleanOneDriver { public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException { Configuration conn = new Configuration(); Job job = Job.getInstance(conn); job.setJarByClass(CleanOneDriver.class); job.setMapperClass(CleanOneMapper.class); job.setReducerClass(CleanOneReduce.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(NullWritable.class); FileInputFormat.addInputPath(job,new Path(args[0])); FileOutputFormat.setOutputPath(job,new Path(args[1])); System.exit(job.waitForCompletion(true)?0:1); } private static class CleanOneMapper extends Mapper<LongWritable,Text,Text,NullWritable> { @Override protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, NullWritable>.Context context) throws IOException, InterruptedException { String str = new String(value.getBytes(), 0, value.getLength(), "GBK"); context.write(new Text(str),NullWritable.get()); } } private static class CleanOneReduce extends Reducer<Text,NullWritable,Text,NullWritable> { @Override protected void reduce(Text key, Iterable<NullWritable> values, Reducer<Text, NullWritable, Text, NullWritable>.Context context) throws IOException, InterruptedException { context.write(key,NullWritable.get()); } } }