package flinkdemo.flinksql.sqlJoin;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.BatchTableEnvironment;
import org.apache.flink.table.functions.ScalarFunction;
import org.apache.flink.types.Row;
import java.util.ArrayList;
import java.util.List;
/**
* @author zhangkai
* @create 2019/12/23
*/
public class UdfSql {
/**
* 繼承 ScalarFunction 類
* 重寫 eval方法
* tableEnv註冊 udf 函數
* sql使用函數
* */
public static void main(String[] args) throws Exception{
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env);
tableEnv.registerFunction("stringConcat",new StringConcat("study_","_good"));
String[] strArr = "hadoop,flink,hadoop,spark,kafka,hadoop,flink".split(",");
List list = new ArrayList<>();
for(String ele : strArr){
list.add(new WordCount(ele,1));
}
DataSet<WordCount> dataSource = env.fromCollection(list);
tableEnv.registerDataSet("wordCCount",dataSource,"word,frequency");
Table table = tableEnv.sqlQuery("select stringConcat(word)as word ,sum(frequency) as frequency from wordCCount group by word");
DataSet<Row> rowDataSet = tableEnv.toDataSet(table, Row.class);
rowDataSet.print();
}
public static class StringConcat extends ScalarFunction {
String pre;
String last;
public StringConcat() {
}
public StringConcat(String pre, String last) {
this.pre = pre;
this.last = last;
}
public String eval(String value){
return pre + value + last;
}
}
public static class WordCount{
public String word;
public long frequency;
public WordCount() {
}
public WordCount(String word, long frequency) {
this.word = word;
this.frequency = frequency;
}
@Override
public String toString() {
return "WordCount{" +
"word='" + word + '\'' +
", frequency=" + frequency +
'}';
}
}
}