Environment
getExecutionEnvironment:创建一个执行环境,表示当前执行程序的上下文。 如果程序是独立调用的,则此方法返回本地执行环境;如果从命令行客户端调用程序以提交到集群,则此方法返回此集群的执行环境,也就是说,getExecutionEnvironment会根据查询运行的方式决定返回什么样的运行环境,是最常用的一种创建执行环境的方式。如果没有设置并行度,会以flink-conf.yaml中的配置为准,默认是1。
// 批处理
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
// 流处理
val env = StreamExecutionEnvironment.getExecutionEnvironment
createLocalEnvironment:返回本地执行环境,需要在调用时指定默认的并行度
val env = StreamExecutionEnvironment.createLocalEnvironment(1)
createRemoteEnvironment:返回集群执行环境,将Jar提交到远程服务器。需要在调用时指定JobManager的IP和端口号,并指定要在集群中运行的Jar包。
val env = ExecutionEnvironment.createRemoteEnvironment("jobmanage-hostname", 6123,"YOURPATH//wordcount.jar")
Source之从集合中读取数据
SensorReading.scala
// 定义样例类,传感器id,时间戳,温度
case class SensorReading(id: String, timestamp: Long, temperature: Double)
SourceForCollection.scala
// 隐式转换很重要
import org.apache.flink.streaming.api.scala._
/**
* 从集合中获取数据
*/
object SourceForCollection {
def main(args: Array[String]): Unit = {
// 创建执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
// 从集合中读取数据
val listDstream : DataStream[SensorReading] = env.fromCollection(List(
SensorReading("sensor_1", 1547718199, 35.8),
SensorReading("sensor_6", 1547718201, 15.4),
SensorReading("sensor_7", 1547718202, 6.7),
SensorReading("sensor_10", 1547718205, 38.1)
))
listDstream.print("stream for list").setParallelism(1)
// 执行job
env.execute("source test job")
}
}
Source之从文件中读取数据
SensorReading.txt
sensor_1,1547718199,35.8
sensor_6,1547718201,15.4
sensor_7,1547718202,6.7
sensor_10,1547718205,38.1
SourceForFile.scala
import org.apache.flink.streaming.api.scala._
/**
* Source从文件中读取
*/
object SourceForFile {
def main(args: Array[String]): Unit = {
// 创建执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
val fileDstream: DataStream[String] =
env.readTextFile("D:\\MyWork\\WorkSpaceIDEA\\flink-tutorial\\src\\main\\resources\\SensorReading.txt")
fileDstream.print("source for file")
// 执行job
env.execute("source test job")
}
}
Source之从Kafka消息队列的数据作为来源
pom.xml
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka-0.11_2.11</artifactId>
<version>1.10.0</version>
</dependency>
SourceForKafka.scala
import java.util.Properties
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
/**
* 以kafka消息队列的数据作为来源
*/
object SourceForKafka {
def main(args: Array[String]): Unit = {
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
// 先创建kafka的相关配置
val properties: Properties = new Properties()
properties.setProperty("bootstrap.servers", "hadoop102:9092")
properties.setProperty("group.id", "consumer-group")
properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
properties.setProperty("auto.offset.reset", "latest")
val kafkaDstream:DataStream[String] = env.addSource( new FlinkKafkaConsumer011[String]("sensor", new SimpleStringSchema(), properties))
kafkaDstream.print("source for kafka")
env.execute("source test job")
}
}
开启kafka生产者
// kafka数据生产者
./bin/kafka-console-producer.sh --broker-list hadoop102:9092 --topic sensor
自定义Source
SourceForCustom.scala
import com.atguigu.bean.SensorReading
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.scala._
import scala.collection.immutable
import scala.util.Random
/**
* 自定义一个Source
*/
object SourceForCustom {
def main(args: Array[String]): Unit = {
// 创建执行环境
val env = StreamExecutionEnvironment.getExecutionEnvironment
val customDstream: DataStream[SensorReading] = env.addSource( MySensorSource())
customDstream.print("source for custom").setParallelism(4)
env.execute("source test job")
}
}
// 自定义生成测试数据源的SourceFunction
case class MySensorSource() extends SourceFunction[SensorReading]{
// 定义一个标识位,用来表示数据源是否正常运行
var running: Boolean = true
override def cancel(): Unit = {
running = false
}
// 随机生成10个传感器的温度数据
override def run(sourceContext: SourceFunction.SourceContext[SensorReading]): Unit = {
// 初始化一个随机数生成器
val random = new Random()
// 初始化10个传感器的温度值,随机生成,包装成二元组(id, temperature)
var createTemperature: immutable.IndexedSeq[(String, Double)] = 1.to(10).map(
i => ("sensor_" + i, 60 + random.nextGaussian() * 20)
)
// 无限循环生成数据,如果cancel的话就停止
while (running) {
// 更新当前温度值,再之前温度上增加微小扰动(上下浮动的数)
createTemperature = createTemperature.map(
data => (data._1, data._2 + random.nextGaussian())
)
// 获取当前时间戳,包装样例类
val timestamp: Long = System.currentTimeMillis()
createTemperature.foreach(
data => sourceContext.collect( SensorReading(data._1, timestamp, data._2))
)
// 间隔200ms
Thread.sleep(200)
}
}
}