Java Lab Booklet
Lab: Understanding Block Storage
2.The block size must be a multiple of 512 bytes (the checksum size).
View the number of blocks
hdfs fsck /user/root/stocks.csv
-files --- the names of the files on the DataNodes.
-blocks --- the block IDs of the file.
-locations --- the IP addresses of the DataNodes.
Lab: Configuring a Hadoop Development Environment
hdfs dfsadmin -report --- Verify DataNodes in cluster.
yarn node -list -- Verify NodeManagers in cluster.
Lab: Putting Files in HDFS with Java
Configuration configuration = new Configuration();
build.gradle:
project.ext.mainclass = 'hdfs.InputCounties'
project.ext.archiveName = 'inputcounties.jar'
apply from: '/root/java/labs/build.gradle'
yarn jar inputcounties.jar hdfs.InputCounties
Why are the words sorted alphabetically?
The words are the keys, and keys get sorted during the shuffle/sort phase.
package
wordcount;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private static final IntWritable ONE = new IntWritable(1);
private Text outputKey = new Text();
@Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
String lineStr = value.toString();
String[] words = StringUtils.split(lineStr, ' ');
//super.map(key, value, context);
for (String word : words) {
outputKey.set(word);
context.write(outputKey, ONE);
}
}
@Override
protected void setup(
Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
super.setup(context);
}
@Override
protected void cleanup(
Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
super.cleanup(context);
}
}
package
wordcount;
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable outputValue = new IntWritable();
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value : values) {
sum += value.get();
}
outputValue.set(sum);
context.write(key, outputValue);
}
}