开发环境准备
1、JDK
2、Maven
3、IDEA
使用Flink开发一个批处理应用程序
以最简单的 word count 为案例
准备一个文本,路径为 src/main/test_files/test_file
hello,welcome
hello,world,welcome
开发流程
- set up the batch execution environment
- read
- transform operations 开发的核心所在:开发业务逻辑
- execute program
Java实现
官方网址
第一种创建项目的方式
mvn archetype:generate \
-DarchetypeGroupId=org.apache.flink \
-DarchetypeArtifactId=flink-quickstart-java \
-DarchetypeVersion=1.9.0
第二种创建项目的方式
其实是一个shell脚本
$ curl https://flink.apache.org/q/quickstart.sh | bash -s 1.9.0
使用maven创建的时候会出现一个问题
[INFO] Generating project in Interactive mode
命令行会停留在这一行很久,如何解决?在mvn后加一个参数
mvn archetype:generate \
-DarchetypeGroupId=org.apache.flink \
-DarchetypeArtifactId=flink-quickstart-java \
-DarchetypeVersion=1.9.0 \
-DarchetypeCatalog=local
之后分别输入红框中自定义名字
成功
java代码
package com.kun.flink.java.chapter02;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;
/**
* 使用java api来开发Flink的批处理应用程序
*/
public class BatchWCJavaAPP {
public static void main(String[] args) throws Exception {
String input = "src/main/test_files/test_file";
//step1、获取运行环境
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
//step2、read data
DataSource<String> text = env.readTextFile(input);
//step3、transform
//每一行数据按照指定的分隔符拆分
text.flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
@Override
public void flatMap(String s, Collector<Tuple2<String,Integer>> collector) throws Exception {
String[] strings = s.toLowerCase().split(",");
for(String string:strings){
if(string.length()>0){
//为每个单词附上次数1
collector.collect(new Tuple2<String,Integer>(string,1));
}
}
}
})
//合并操作
.groupBy(0).sum(1).print()
;
}
}
运行结果
19:29:13,205 INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Stopped Akka RPC service.
(world,1)
(hello,2)
(welcome,2)
Scala实现
官方网址
有两种构建项目的方式 SBT和Maven
这里只测试maven
$ mvn archetype:generate \
-DarchetypeGroupId=org.apache.flink \
-DarchetypeArtifactId=flink-quickstart-scala \
-DarchetypeVersion=1.9.0 \
-DarchetypeCatalog=local
步骤和上面java创建工程一样
scala代码
package com.kun.flink.scala.chapter02
import org.apache.flink.api.scala._
object BatchWCScalaAPP {
def main(args: Array[String]): Unit = {
val input ="src/test_files/test_file"
val env = ExecutionEnvironment.getExecutionEnvironment
val text=env.readTextFile(input)
text.flatMap(_.toLowerCase.split(","))
.filter(_.nonEmpty)
.map((_,1))
.groupBy(0)
.sum(1).print()
}
}
结果
19:53:00,164 INFO org.apache.flink.runtime.rpc.akka.AkkaRpcService - Stopped Akka RPC service.
(world,1)
(hello,2)
(welcome,2)
使用Flink开发一个流处理应用程序
测试流处理的时候先打开nc
java
package com.kun.flink.java.chapter02;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
/**
* 使用java api来开发Flink的实时处理应用程序
*
* wc统计的数据源自socket
*/
public class StreamingWCJavaAPP {
public static void main(String[] args) throws Exception {
//step1:获取执行环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//step2:读取数据
DataStreamSource<String> text = env.socketTextStream("hadoop",9999);
//step3:transform
//每一行数据按照指定的分隔符拆分
text.flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
@Override
public void flatMap(String s, Collector<Tuple2<String,Integer>> collector) throws Exception {
String[] strings = s.toLowerCase().split(",");
for(String string:strings){
if(string.length()>0){
//为每个单词附上次数1
collector.collect(new Tuple2<String,Integer>(string,1));
}
}
}
})
//合并操作
.keyBy(0).timeWindow(Time.seconds(5)).sum(1).print()
;
env.execute("StreamingWCJavaAPP");
}
}
结果
2> (b,2)
6> (a,3)
4> (c,4)
重构程序手动传入参数
package com.kun.flink.java.chapter02;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
/**
* 使用java api来开发Flink的实时处理应用程序
*
* wc统计的数据源自socket
*/
public class StreamingWCJava02APP {
public static void main(String[] args) throws Exception {
//获取参数
int port = 0;
//flink里专属类
try {
ParameterTool tool = ParameterTool.fromArgs(args);
port=tool.getInt("port");
}catch (Exception e){
System.err.println("端口未设置");
port = 9998;
}
//step1:获取执行环境
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//step2:读取数据
DataStreamSource<String> text = env.socketTextStream("hadoop",port);
//step3:transform
//每一行数据按照指定的分隔符拆分
text.flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
@Override
public void flatMap(String s, Collector<Tuple2<String,Integer>> collector) throws Exception {
String[] strings = s.toLowerCase().split(",");
for(String string:strings){
if(string.length()>0){
//为每个单词附上次数1
collector.collect(new Tuple2<String,Integer>(string,1));
}
}
}
})
//合并操作
.keyBy(0).timeWindow(Time.seconds(5)).sum(1).print()
;
env.execute("StreamingWCJavaAPP");
}
}
运行方法:(运行结果略)
scala
package com.kun.flink.scala.chapter02
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
object StreamingWCScalaAPP {
def main(args: Array[String]): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val text=env.socketTextStream("hadoop",9999)
import org.apache.flink.streaming.api.scala._
text.flatMap(_.split(","))
.map((_,1))
.keyBy(0)
.timeWindow(Time.seconds(5))
.sum(1).print()
env.execute("StreamingWCScalaAPP")
}
}
运行结果
4> (c,1)
6> (a,2)
2> (b,3)