文章目录
- 1. 启动Hadoop服务
- 2. 创建文本文件
- 3. 上传文本文件
- 4. 显示文件内容
- 5. 完成排序任务
- 6. 计算最大利润和平均利润
1. 启动Hadoop服务
在master虚拟机上执行命令:
start-all.sh
2. 创建文本文件
在master虚拟机上创建本地文件
students.txt
李晓文 女 20张晓航 男 19郑小刚 男 21吴文华 女 18肖云宇 男 22陈燕文 女 19李连杰 男 23艾晓丽 女 21童安格 男 18
- 使用vim,创建并编辑
students.txt
- 使用
cat
命令查看验证
3. 上传文本文件
将students.txt上传到HDFS的/student/input目录
- 在hdfs上创建/student/input目录,执行命令:
hadoop fs -mkdir -p /student/input
- 利用Hadoop WebUI查看验证
- 上传文本文件,执行命令:
hadoop fs -put students.txt /student/input
- 利用Hadoop WebUI查看验证
4. 显示文件内容
创建Maven项目DisplayFile,读取/student/input/students.txt文件,将内容显示在控制台
创建Maven项目
在pom.xml文件里添加hadoop和junit依赖
<dependencies> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>3.3.4</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.13.2</version> </dependency> </dependencies>
- 在resources目录里创建log4j.properties文件
log4j.rootLogger=stdout, logfilelog4j.appender.stdout=org.apache.log4j.ConsoleAppenderlog4j.appender.stdout.layout=org.apache.log4j.PatternLayoutlog4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%nlog4j.appender.logfile=org.apache.log4j.FileAppenderlog4j.appender.logfile.File=target/hdfs.loglog4j.appender.logfile.layout=org.apache.log4j.PatternLayoutlog4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
- 创建net.kox.hdfs包,在包里创建DisplayFile类
- 编写程序,实现任务要求
package net.kox.hdfs;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.junit.Test;import java.io.BufferedReader;import java.io.InputStreamReader;import java.net.URI;public class DisplayFile { @Test public void read1() throws Exception { // 创建配置对象 Configuration conf = new Configuration(); // 设置数据节点主机名属性 conf.set("dfs.client.use.datanode.hostname", "true"); // 定义统一资源标识符(uri: uniform resource identifier) String uri = "hdfs://master:9000"; // 创建文件系统对象(基于HDFS的文件系统) FileSystem fs = FileSystem.get(new URI(uri), conf, "root"); // 创建路径对象(指向文件) Path path = new Path(uri + "/student/input/students.txt"); System.out.println(path); // 创建文件系统数据字节输入流(进水管:数据从文件到程序) FSDataInputStream in = fs.open(path); // 创建缓冲字符输入流,提高读取效率(字节流-->字符流-->缓冲流) BufferedReader br = new BufferedReader(new InputStreamReader(in)); // 定义行字符串变量 String nextLine = ""; // 通过循环遍历缓冲字符输入流 while ((nextLine = br.readLine()) != null) { // 在控制台输出读取的行 System.out.println(nextLine); } // 关闭缓冲字符输入流 br.close(); // 关闭文件系统数据字节输入流 in.close(); // 关闭文件系统 fs.close(); }}
- 运行程序,查看结果
5. 完成排序任务
创建Maven项目SortByAge,利用MapReduce计算框架,处理/student/input/students.txt文件,输出结果按照年龄降序排列
- 创建Maven项目SortByAge
- 在pom.xml文件里添加hadoop和junit依赖
<dependencies> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>3.3.4</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.13.2</version> </dependency> </dependencies>
- 在resources目录里创建log4j.properties文件
log4j.rootLogger=stdout, logfilelog4j.appender.stdout=org.apache.log4j.ConsoleAppenderlog4j.appender.stdout.layout=org.apache.log4j.PatternLayoutlog4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%nlog4j.appender.logfile=org.apache.log4j.FileAppenderlog4j.appender.logfile.File=target/hdfs.loglog4j.appender.logfile.layout=org.apache.log4j.PatternLayoutlog4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
- 在net.kox.mr包里创建Student类
- 编写代码
package net.kox.mr;import org.apache.hadoop.io.WritableComparable;import java.io.DataInput;import java.io.DataOutput;import java.io.IOException;public class Student implements WritableComparable<Student> { private String name; private String gender; private int age; public String getName() { return name; } public void setName(String name) { this.name = name; } public String getGender() { return gender; } public void setGender(String gender) { this.gender = gender; } public int getAge() { return age; } public void setAge(int age) { this.age = age; } @Override public String toString() { return "Student{" + "name='" + name + '\'' + ", gender='" + gender + '\'' + ", age=" + age + '\''+ '}'; } public int compareTo(Student o) { return o.getAge() - this.getAge(); // 降序 } public void write(DataOutput out) throws IOException { out.writeUTF(name); out.writeUTF(gender); out.writeInt(age); } public void readFields(DataInput in) throws IOException { name = in.readUTF(); gender = in.readUTF(); age = in.readInt(); }}
- 在net.kox.mr里创建StudentMapper类
- 编写程序
package net.kox.mr;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;public class StudentMapper extends Mapper<LongWritable, Text, Student, NullWritable> { @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 获取行内容 String line = value.toString(); // 按空格拆分得到字段数组 String[] fields = line.split(" "); // 获取学生信息 String name = fields[0]; String gender = fields[1]; int age = Integer.parseInt(fields[2]); // 创建学生对象 Student student = new Student(); // 设置学生对象属性 student.setName(name); student.setGender(gender); student.setAge(age); context.write(student, NullWritable.get()); }}
- 在net.kox.mr包里创建StudentReducer类
package net.kox.mr;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;public class StudentReducer extends Reducer<Student, NullWritable, Text, NullWritable> { @Override protected void reduce(Student key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException { for (NullWritable value : values) { // 获取学生对象 Student student = key; // 拼接学生信息 String studentInfo = student.getName() + "\t" + student.getGender() + "\t" + student.getAge(); context.write(new Text(studentInfo), NullWritable.get()); } }}
- 在net.kox.mr包里创建StudentDriver类
package net.kox.mr;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IOUtils;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.net.URI;public class StudentDriver { public static void main(String[] args) throws Exception { // 创建配置对象 Configuration conf = new Configuration(); // 设置数据节点主机名属性 conf.set("dfs.client.use.datanode.hostname", "true"); // 获取作业实例 Job job = Job.getInstance(conf); // 设置作业启动类 job.setJarByClass(StudentDriver.class); // 设置Mapper类 job.setMapperClass(StudentMapper.class); // 设置map任务输出键类型 job.setMapOutputKeyClass(Student.class); // 设置map任务输出值类型 job.setMapOutputValueClass(NullWritable.class); // 设置Reducer类 job.setReducerClass(StudentReducer.class); // 设置reduce任务输出键类型 job.setOutputKeyClass(Student.class); // 设置reduce任务输出值类型 job.setOutputValueClass(NullWritable.class); // 定义uri字符串 String uri = "hdfs://master:9000"; // 创建输入目录 Path inputPath = new Path(uri + "/student/input"); // 创建输出目录 Path outputPath = new Path(uri + "/student/output"); // 获取文件系统 FileSystem fs = FileSystem.get(new URI(uri), conf); // 删除输出目录(第二个参数设置是否递归) fs.delete(outputPath, true); // 给作业添加输入目录(允许多个) FileInputFormat.addInputPath(job, inputPath); // 给作业设置输出目录(只能一个) FileOutputFormat.setOutputPath(job, outputPath); // 等待作业完成 job.waitForCompletion(true); // 输出统计结果 System.out.println("======统计结果======"); FileStatus[] fileStatuses = fs.listStatus(outputPath); for (int i = 1; i < fileStatuses.length; i++) { // 输出结果文件路径 System.out.println(fileStatuses[i].getPath()); // 获取文件系统数据字节输入流 FSDataInputStream in = fs.open(fileStatuses[i].getPath()); // 将结果文件显示在控制台 IOUtils.copyBytes(in, System.out, 4096, false); } }}
- 运行程序,查看结果
6. 计算最大利润和平均利润
有三个月的利润信息profit.txt
1 100001 150001 200002 23402 56402 61403 150003 23803 8900
创建Maven项目MaxAvgProfit,利用利用MapReduce计算框架,处理profit.txt文件,输出每月最大利润和平均利润
准备数据
创建Maven项目MaxAvgProfit
在pom.xml文件里添加hadoop和junit依赖
<dependencies> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>3.3.4</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>4.13.2</version> </dependency> </dependencies>
- 在resources目录里创建log4j.properties文件
log4j.rootLogger=stdout, logfilelog4j.appender.stdout=org.apache.log4j.ConsoleAppenderlog4j.appender.stdout.layout=org.apache.log4j.PatternLayoutlog4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%nlog4j.appender.logfile=org.apache.log4j.FileAppenderlog4j.appender.logfile.File=target/hdfs.loglog4j.appender.logfile.layout=org.apache.log4j.PatternLayoutlog4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
- 在net.kox.mr里创建ScoreMapper类
package net.kox.mr;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;import java.io.IOException;public class ScoreMapper extends Mapper <LongWritable, Text, Text, IntWritable>{ @Override protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // 获取行内容 String line = value.toString(); // 按空格拆分得到字段数组 String[] fields = line.split(" "); // 获取姓名 String name = fields[0].trim(); // 遍历各科成绩 for (int i = 1; i < fields.length; i++) { // 获取成绩 int score = Integer.parseInt(fields[i].trim()); // 写入键值对 context.write(new Text(name), new IntWritable(score)); } }}
- 在net.kox.mr包里创建ScoreDriver类
package net.kox.mr;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FSDataInputStream;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IOUtils;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import java.net.URI;public class ScoreDriver { public static void main(String[] args) throws Exception { // 创建配置对象 Configuration conf = new Configuration(); // 设置数据节点主机名属性 conf.set("dfs.client.use.datanode.hostname", "true"); // 获取作业实例 Job job = Job.getInstance(conf); // 设置作业启动类 job.setJarByClass(ScoreDriver.class); // 设置Mapper类 job.setMapperClass(ScoreMapper.class); // 设置map任务输出键类型 job.setMapOutputKeyClass(Text.class); // 设置map任务输出值类型 job.setMapOutputValueClass(IntWritable.class); // 设置Reducer类 job.setReducerClass(ScoreReducer.class); // 设置reduce任务输出键类型 job.setOutputKeyClass(Text.class); // 设置reduce任务输出值类型 job.setOutputValueClass(NullWritable.class); // 定义uri字符串 String uri = "hdfs://master:9000"; // 创建输入目录 Path inputPath = new Path(uri + "/maxavgprofit/input"); // 创建输出目录 Path outputPath = new Path(uri + "/maxavgprofit/output"); // 获取文件系统 FileSystem fs = FileSystem.get(new URI(uri), conf); // 删除输出目录(第二个参数设置是否递归) fs.delete(outputPath, true); // 给作业添加输入目录(允许多个) FileInputFormat.addInputPath(job, inputPath); // 给作业设置输出目录(只能一个) FileOutputFormat.setOutputPath(job, outputPath); // 等待作业完成 job.waitForCompletion(true); // 输出统计结果 System.out.println("======统计结果======"); FileStatus[] fileStatuses = fs.listStatus(outputPath); for (int i = 1; i < fileStatuses.length; i++) { // 输出结果文件路径 System.out.println(fileStatuses[i].getPath()); // 获取文件系统数据字节输入流 FSDataInputStream in = fs.open(fileStatuses[i].getPath()); // 将结果文件显示在控制台 IOUtils.copyBytes(in, System.out, 4096, false); } }}
- 在net.kox.mr包里创建ScoreReducer类
package net.kox.mr;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;import java.io.IOException;import java.text.DecimalFormat;public class ScoreReducer extends Reducer<Text, IntWritable, Text, NullWritable> { @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { // 声明变量 int count = 0; // 科目数 int sum = 0; // 总分 int avg = 0; // 平均分 int max = 20000; // 遍历迭代器计算总分 for (IntWritable value : values) { count++; // 科目数累加 sum += value.get(); // 总分累加 } // 计算平均分 avg = sum * 1 / count; // 创建小数格式对象 DecimalFormat df = new DecimalFormat("#.#"); // 拼接每个学生总分与平均分成绩信息 String scoreInfo = key + " maxProfit=" + max + ", avgProfit=" + df.format(avg); // 写入键值对 context.write(new Text(scoreInfo), NullWritable.get()); }}
- 运行程序,查看结果