备注:由于不是专业的大数据开发人员,所以,难免有错误或不专业的地方,此处,记录一下是为了以后开发方便。
一、创建表
- 注意:表名和字段名起的要有意义。
- 获取当前hive表的创建语句:show create table base_xxxx_d_material_hive;
- 搜索表名含有某个关键词的hive表: hive> show tables '*material*';
- 查看hive表的表结构:desc base_xxxx_d_material_hive ;
非分区表-demo:
CREATE TABLE `base_xxx_d_material_hive`(
`qipu_id` bigint COMMENT '描述信息xxx',
`first_source` string COMMENT '描述信息xxx',
`cut_from` int COMMENT '描述信息xxxx',
`cut_type` double COMMENT '描述信息xxxx',
`reject_recommend` boolean COMMENT '描述信息xxxx',
`has_original_site_logo` boolean COMMENT '描述信息xxxx',
`v_cate_type` int COMMENT '1-长 2-短 3-小',
`user_tag_list` array<string> COMMENT '用户标签结构化的数据,数组类型') STORED AS PARQUET;
分区表-demo:
CREATE TABLE `test_base_xx_d_album_audit_hive`(
`id` bigint COMMENT '主键ID,自动递增',
`title` string COMMENT '标题',
`uid` bigint COMMENT '上传者的uid',
`xxxx_name` string COMMENT '用户品类,多个用英文逗号分隔',
`page_url` string COMMENT '链接地址',
`create_time` string COMMENT '该条记录的创建时间',
`create_user` string COMMENT '该条记录的创建人',
`update_time` string COMMENT '该条记录的最后一次修改时间',
`update_user` string COMMENT '该条记录的最后一次修改者',
`version` bigint COMMENT '暂无意义',
`pending_time` string COMMENT '提审时间,以最新的为准(历史数据没有)',
`low_quality_rate` double COMMENT '低质率(和播放量一样,每天更新一次)',
`short_title` array<string> COMMENT '标题集合'
) PARTITIONED BY (
`dt` string) STORED AS PARQUET ;
二、新增字段
//添加新的字段和注释
alter table base_xxx_d_material_hive add columns(user_tag_list array<string>);
alter table beexxx.base_xxx_d_material_hive change column user_tag_list user_tag_list array<string> COMMENT '用户标签结构化的数据,数组类型';
//如果一次想增多个字段,可以这样
alter table test_xxx_biz_d_mp_tag_audit_data_tbl add columns(
main_tag_info_list_short array<string> comment '主标签-简洁版(hanliwei-add)',
content_tag_info_list_short array<string> comment '内容标签-简洁版(hanliwei-add)',
nlp_main_tag_info_list_short array<string> comment 'nlp主标签-简洁版(hanliwei-add)',
nlp_content_tag_info_list_short array<string> comment 'nlp内容标签-简洁版(hanliwei-add)',
key_tag_list_short array<string> comment '重点标签-简洁版(hanliwei-add)'
);
//添加字段,分区表需要实时生效的,
alter table open_platform.xxx_biz_d_mp_tag_audit_data_tbl add columns(author_content_tag_info_list_short array<string>) CASCADE;
alter table open_platform.xxx_biz_d_mp_tag_audit_data_tbl change column author_content_tag_info_list_short author_content_tag_info_list_short array<string> COMMENT '用户标签-简洁版(xxx-add)';
CASCADE 可以保证目前分区的数据,在重新写入时,新字段会有值。否则,为NULL.只有新的分区才会生效。
三、删除字段
//删除hive字段,其实,删除之前的所有的字段,再添加新的字段,过程。
//比如删除user_tag_list字段,就是下面的语句少了user_tag_list字段。
alter TABLE `base_xxxx_d_material_hive` replace columns (
`qipu_id` bigint COMMENT '描述信息xxx',
`first_source` string COMMENT '描述信息xxx',
`cut_from` int COMMENT '描述信息xxxx',
`cut_type` double COMMENT '描述信息xxxx',
`reject_recommend` boolean COMMENT '描述信息xxxx',
`has_original_site_logo` boolean COMMENT '描述信息xxxx',
`xxx_cate_type` int COMMENT '1-长 2-短 3-小'
);
四、修改字段注释
//修改现有字段的注释
alter table beexxxx.base_xxxx_d_material_hive change column user_tag user_tag string COMMENT '用户标签[字符类型],结构化的数据请使用user_tag_list';
五、删除表或数据
//永久删除测试表
drop table xxx.base_xxx_d_material_hive_test purge;
//如果还想恢复数据,不需要加purge
drop table xxx.base_xxx_d_material_hive_test;
//truncate table 表名
truncate 用于删除表中的所有的行, 或 delete from 表名 where 1 = 1;
//删除分区数据
ALTER TABLE xxx_biz_d_mp_tag_audit_data_tbl DROP PARTITION (dt='2008-07-28');
批量删除
ALTER TABLE xxx_biz_d_mp_tag_audit_data_tbl DROP PARTITION (dt<'2020-10-01');
ALTER TABLE xxx_orig_d_mp_tag_audit_data_tbl DROP PARTITION (dt<'2020-10-01');
六、eplode和LATERAL VIEW的简单使用
参考:https://www.jianshu.com/p/8689a2283cae
1.去掉[]
select regexp_replace('["爱我就别想太多","曹慧生","李一桐","夏可可"]','\\["|"\\]','');
//结果:爱我就别想太多","曹慧生","李一桐","夏可可
select regexp_replace('[9000001026042900,9000001026041200,9000001026043600,9000001026056100]','\\[|\\]','');
//结果:9000001026042900,9000001026041200,9000001026043600,9000001026056100
2.转为数组
select split(regexp_replace('[9000001026042900,9000001026041200,9000001026043600,9000001026056100]','\\[|\\]',''),",");
//结果:["9000001026042900","9000001026041200","9000001026043600","9000001026056100"]
3.用表中的数据-测试
select id,qipu_id,split(regexp_replace(video_ids,'\\[|\\]',''),",") from beehive.test_base_beehive_d_album_audit_hive;
//结果:
234 247921701 ["6104181288969800","5367793040785600","5406297001473500","1141181189251800","5730372003942100","3629499648811200"]
592 253091801 ["17006224200","17129812000"]
244 3080818208933901 ["9000001000368700"]
4.使用exploade方法
select explode(split(regexp_replace(video_ids,'\\[|\\]',''),",")) AS video_id from beehive.test_base_xxx_d_album_audit_hive;
结果:
5406297001473500
1141181189251800
5730372003942100
3629499648811200
5.使用 LATERAL VIEW 和 eplode
select qipu_id,video_id from beehive.test_base_xxx_d_album_audit_hive t
LATERAL VIEW explode(split(regexp_replace(t.video_ids,'\\[|\\]',''),",")) tmp_tab as video_id ;
结果:
247921701 6104181288969800
247921701 5367793040785600
247921701 5406297001473500
247921701 1141181189251800
247921701 5730372003942100
247921701 3629499648811200
253091801 17006224200
253091801 17129812000
3080818208933901 9000001000368700
//加入where条件的sql
select t1.qipu_id,t1.video_id,t2.quality_level from
(select qipu_id,video_id from beehive.test_base_xxx_d_album_audit_hive t
LATERAL VIEW explode(split(regexp_replace(t.video_ids,'\\[|\\]',''),",")) tmp_tab as video_id where t.dt = '2022-04-01') t1
left join xxx.base_xxx_d_material_hive_test t2
on t1.video_id = t2.qipu_id;
七、insert插入数据
- 注意:如果我们想给新增的hive表添加一些测试数据,可以用此部分的内容。
//insert overwrite table beehive.test_base_xxx_d_album_quality_level_hive partition (dt='${dt}')
insert overwrite table beehive.test_base_xxx_d_album_quality_level_hive partition (dt='2022-04-01')
select t1.qipu_id,t1.video_id,t2.quality_level from
(select qipu_id,video_id from beehive.test_base_xxx_d_album_audit_hive t
LATERAL VIEW explode(split(regexp_replace(t.video_ids,'\\[|\\]',''),",")) tmp_tab as video_id) t1
left join beehive.base_xxxx_d_material_hive_test t2
on t1.video_id = t2.qipu_id;
八、case when的例子
参考:https://blog.csdn.net/u011944141/article/details/79133692
//查询结果
select
album_qipu_id,
count(1) total_count,
count(case when video_quality_level = 'LOW' then 1 end) low_quality_num
from
xxx.test_base_xxx_d_album_quality_level_hive
group by album_qipu_id;
九、其他例子
select regexp_replace('["爱我就别想太多","曹慧生","李一桐","夏可可"]','\\["|"\\]','') ;
结果:爱我就别想太多","曹慧生","李一桐","夏可可
select regexp_replace(regexp_replace('["爱我就别想太多","曹慧生","李一桐","夏可可"]','\\["|"\\]',''),'","','%%') ;
结果:爱我就别想太多%%曹慧生%%李一桐%%夏可可
select split(regexp_replace(regexp_replace('["爱我就别想太多","曹慧生","李一桐","夏可可"]','\\["|"\\]',''),'","','%%'),'%%');
结果:["爱我就别想太多","曹慧生","李一桐","夏可可"]
select split(regexp_replace(regexp_replace('["爱我就别想太多"]','\\["|"\\]',''),'","','%%'),'%%');
结果:["爱我就别想太多"]
//---------------中文正则过滤case
select regexp_replace('["娱乐","电视剧-香港","新闻","电影-周边","张柏芝"]','"[\\u4E00-\\u9FA5]+-','"');
结果:["娱乐","香港","新闻","周边","张柏芝"]
select regexp_replace('["娱乐","电视剧-香港","新闻","周边","电影-张柏芝"]','"[^-"]+-','"');
结果:["娱乐","香港","新闻","周边","张柏芝"]
// ------------------------截取小数点位数
测试:
select regexp_replace('0.9902856349945068,0.9902856349945068','(0\\.[0-9]{1,3})[0-9]+','$1');
结果:0.990,0.990
select regexp_replace('0.9902856349945068','(0\\.[0-9]{1,3})[0-9]+','$1');
结果:0.990
十、get_json_object()的用法
https://blog.csdn.net/qq_34105362/article/details/80454697
https://sjq597.github.io/2015/11/05/Hive-get-json-object%E7%94%A8%E6%B3%95/
https://www.cnblogs.com/drjava/p/10486134.html
https://zhuanlan.zhihu.com/p/40914513
insert overwrite table test_nlp_tag_confidence_tbl_hanliwei
select
qixx_id,
paxx_url,
if(nlp_main_tag_info_list_short='[]',regexp_replace(regexp_replace(nlp_content_tag_info_list_short,'"[\\u4E00-\\u9FA5]+-','"'),'\\["|"\\]|"',''),concat(regexp_replace(regexp_replace(nlp_main_tag_info_list_short,'"[\\u4E00-\\u9FA5]+-','"'),'\\["|"\\]|"',''),',',regexp_replace(regexp_replace(nlp_content_tag_info_list_short,'"[\\u4E00-\\u9FA5]+-','"'),'\\["|"\\]|"',''))) as nlp_tag_info,
if(nlp_main_tag_confidence_list_short='[]',regexp_replace(regexp_replace(nlp_content_tag_confidence_list_short,'\\[|\\]',''),'(0\\.[0-9]{1,3})[0-9]+','$1'),regexp_replace(concat(regexp_replace(nlp_main_tag_confidence_list_short,'\\[|\\]',''),',',regexp_replace(nlp_content_tag_confidence_list_short,'\\[|\\]','')),'(0\\.[0-9]{1,3})[0-9]+','$1')) as nlp_tag_confidence,
algorithm_version
from (
select get_json_object(video_info, '$.qixxId') as qipu_id,
get_json_object(video_info, '$.paxxUrl') as page_url,
get_json_xpath(nlp_tag_info, '$.mainCategoryTagInfoList[*].tagName') as nlp_main_tag_info_list_short,
get_json_xpath(nlp_tag_info, '$.contentTagInfoList[*].tagName') as nlp_content_tag_info_list_short,
get_json_xpath(nlp_tag_info, '$.mainCategoryTagInfoList[*].confidence') as nlp_main_tag_confidence_list_short,
get_json_xpath(nlp_tag_info, '$.contentTagInfoList[*].confidence') as nlp_content_tag_confidence_list_short,
get_json_xpath(nlp_tag_info, '$.algorithmVersion') as algorithm_version
from xxx_orig_d_xxx_tag_audit_data_tbl
where dt = '2020-07-30'
) t;
//结果:
179xxx49600 http://www.xx.com/v_xxrz4uov64.html 综艺,搞笑,小品,片段,周云鹏 0.992,0.603,0.603,0.944,0.970
1758xxx8700 http://www.xxx.com/v_1xxxzgq4wn4.html 资讯,新型冠状病毒,社会,灾难意外,抗洪救灾,鄱阳县 0.986,0.986,0.971,0.837,0.825,0.805
1795xxx9700 http://www.xxx.com/v_1xxxxlvs.html 儿童,玩具,母婴,幼儿,试玩,植物大战僵尸,模型玩具,益智 0.991,0.991,0.631,0.631,0.625,0.899,0.787,0.799