Hadoop MapReduce on Eclipse: Cleaning up the staging area file:/app/hadoop/tmp/mapred/staging/myname183880112/.staging/job_local183880112_0001 -
2014-04-04 16:02:31.633 java[44631:1903] unable load realm info scdynamicstore 14/04/04 16:02:32 warn util.nativecodeloader: unable load native-hadoop library platform... using builtin-java classes applicable 14/04/04 16:02:32 warn mapred.jobclient: use genericoptionsparser parsing arguments. applications should implement tool same. 14/04/04 16:02:32 warn mapred.jobclient: no job jar file set. user classes may not found. see jobconf(class) or jobconf#setjar(string). 14/04/04 16:02:32 warn snappy.loadsnappy: snappy native library not loaded 14/04/04 16:02:32 info mapred.fileinputformat: total input paths process : 1 14/04/04 16:02:32 info mapred.jobclient: cleaning staging area file:/app/hadoop/tmp/mapred/staging/myname183880112/.staging/job_local183880112_0001 java.lang.nullpointerexception @ org.apache.hadoop.conf.configuration.getlocalpath(configuration.java:950) @ org.apache.hadoop.mapred.jobconf.getlocalpath(jobconf.java:476) @ org.apache.hadoop.mapred.localjobrunner$job.<init>(localjobrunner.java:121) @ org.apache.hadoop.mapred.localjobrunner.submitjob(localjobrunner.java:592) @ org.apache.hadoop.mapred.jobclient$2.run(jobclient.java:1013) @ org.apache.hadoop.mapred.jobclient$2.run(jobclient.java:936) @ java.security.accesscontroller.doprivileged(native method) @ javax.security.auth.subject.doas(subject.java:415) @ org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation.java:1190) @ org.apache.hadoop.mapred.jobclient.submitjobinternal(jobclient.java:936) @ org.apache.hadoop.mapred.jobclient.submitjob(jobclient.java:910) @ org.apache.hadoop.mapred.jobclient.runjob(jobclient.java:1353) @ lineindex.main(lineindex.java:92)
i trying execute mapreduce program line index using mapreduce in eclipse. above error shows up. code is:
public class lineindex { public static class lineindexmapper extends mapreducebase implements mapper<longwritable, text, text, text> { private final static text word = new text(); private final static text location = new text(); public void map(longwritable key, text val, outputcollector<text, text> output, reporter reporter) throws ioexception { filesplit filesplit = (filesplit)reporter.getinputsplit(); string filename = filesplit.getpath().getname(); location.set(filename); string line = val.tostring(); stringtokenizer itr = new stringtokenizer(line.tolowercase()); while (itr.hasmoretokens()) { word.set(itr.nexttoken()); output.collect(word, location); } } } public static class lineindexreducer extends mapreducebase implements reducer<text, text, text, text> { public void reduce(text key, iterator<text> values, outputcollector<text, text> output, reporter reporter) throws ioexception { boolean first = true; stringbuilder toreturn = new stringbuilder(); while (values.hasnext()){ if (!first) toreturn.append(", "); first=false; toreturn.append(values.next().tostring()); } output.collect(key, new text(toreturn.tostring())); } } /** * actual main() method our program; * "driver" mapreduce job. */ public static void main(string[] args) { jobclient client = new jobclient(); jobconf conf = new jobconf(lineindex.class); conf.setjobname("lineindexer"); conf.setoutputkeyclass(text.class); conf.setoutputvalueclass(text.class); fileinputformat.addinputpath(conf, new path("input")); fileoutputformat.setoutputpath(conf, new path("output")); conf.setmapperclass(lineindexmapper.class); conf.setreducerclass(lineindexreducer.class); conf.addresource(new path("/usr/local/hadoop/etc/hadoop/core-site.xml")); conf.addresource(new path("/usr/local/hadoop/etc/hadoop/hdfs-site.xml")); client.setconf(conf); try { jobclient.runjob(conf); } catch (exception e) { e.printstacktrace(); } } }
i unable understand , resolve error nullpointerexception here. please me out??
can add mapred-site.xml file configuration class object , try once more. may need specify property mapred.local.dir
in xml file.
Comments
Post a Comment