一. 準備工作
-- 啓動yarn-session
/home/flink-1.15.2/bin/yarn-session.sh -d
-- 在yarn session模式下啓動flink sql
/home/flink-1.15.2/bin/sql-client.sh embedded -s yarn-session
二. 插入數據
代碼:
-- sets up the result mode to tableau to show the results directly in the CLI
set execution.result-mode=tableau;
CREATE TABLE t2(
uuid VARCHAR(20) PRIMARY KEY NOT ENFORCED,
name VARCHAR(10),
age INT,
ts TIMESTAMP(3),
`partition` VARCHAR(20)
)
PARTITIONED BY (`partition`)
WITH (
'connector' = 'hudi',
'path' = 'hdfs://hp5:8020/user/hudi_data/t2',
'table.type' = 'MERGE_ON_READ' -- this creates a MERGE_ON_READ table, by default is COPY_ON_WRITE
);
-- insert data using values
INSERT INTO t2 VALUES
('id1','Danny',23,TIMESTAMP '1970-01-01 00:00:01','par1'),
('id2','Stephen',33,TIMESTAMP '1970-01-01 00:00:02','par1'),
('id3','Julian',53,TIMESTAMP '1970-01-01 00:00:03','par2'),
('id4','Fabian',31,TIMESTAMP '1970-01-01 00:00:04','par2'),
('id5','Sophia',18,TIMESTAMP '1970-01-01 00:00:05','par3'),
('id6','Emma',20,TIMESTAMP '1970-01-01 00:00:06','par3'),
('id7','Bob',44,TIMESTAMP '1970-01-01 00:00:07','par4'),
('id8','Han',56,TIMESTAMP '1970-01-01 00:00:08','par4');
測試記錄:
三. 查詢數據
select * from t2;
四. 更新數據
更新數據和insert數據類似
-- this would update the record with key 'id1'
insert into t1 values
('id1','Danny',27,TIMESTAMP '1970-01-01 00:00:01','par1');
注意,現在保存模式是追加。通常,總是使用追加模式,除非您試圖第一次創建表。再次查詢數據將顯示更新的記錄。每個寫操作都會生成一個由時間戳表示的新提交。在之前的提交中查找相同的_hoodie_record_keys的_hoodie_commit_time、age字段的更改。
測試記錄:
五. 流查詢
Hudi Flink還提供了獲取自給定提交時間戳以來更改的記錄流的功能。這可以通過使用Hudi的流查詢和提供需要流化更改的起始時間來實現。如果我們希望在給定的提交之後進行所有更改(通常是這樣),則不需要指定endTime。
CREATE TABLE t1(
uuid VARCHAR(20) PRIMARY KEY NOT ENFORCED,
name VARCHAR(10),
age INT,
ts TIMESTAMP(3),
`partition` VARCHAR(20)
)
PARTITIONED BY (`partition`)
WITH (
'connector' = 'hudi',
'path' = '${path}',
'table.type' = 'MERGE_ON_READ',
'read.streaming.enabled' = 'true', -- this option enable the streaming read
'read.start-commit' = '20210316134557', -- specifies the start commit instant time
'read.streaming.check-interval' = '4' -- specifies the check interval for finding new source commits, default 60s.
);
-- Then query the table in stream mode
select * from t1;
這將給出讀取之後發生的所有更改。start-commit提交。該特性的獨特之處在於,它現在允許您在流或批處理數據源上編寫流管道.
六. 刪除數據
在流查詢中使用數據時,Hudi Flink源還可以接受來自底層數據源的更改日誌,然後按行級應用UPDATE和DELETE。然後,您可以在Hudi上爲各種RDBMS同步一個NEAR-REAL-TIME快照。