DataX實現:從Hive到MySQL數據抽取(含完整json配置)

在這裏插入圖片描述


1.需求
從離線Hive數倉ads層抽取數據到Mysql


2.參考DataX官方Github實例
DataX官網
在這裏插入圖片描述
從hive讀數據

{
    "job": {
        "setting": {
            "speed": {
                "channel": 3
            }
        },
        "content": [
            {
                "reader": {
                    "name": "hdfsreader",
                    "parameter": {
                        "path": "/user/hive/warehouse/mytable01/*",
                        "defaultFS": "hdfs://xxx:port",
                        "column": [
                               {
                                "index": 0,
                                "type": "long"
                               },
                               {
                                "index": 1,
                                "type": "boolean"
                               },
                               {
                                "type": "string",
                                "value": "hello"
                               },
                               {
                                "index": 2,
                                "type": "double"
                               }
                        ],
                        "fileType": "orc",
                        "encoding": "UTF-8",
                        "fieldDelimiter": ","
                    }

                },
                "writer": {
                    "name": "streamwriter",
                    "parameter": {
                        "print": true
                    }
                }
            }
        ]
    }
}

寫入MySQL

{
    "job": {
        "setting": {
            "speed": {
                "channel": 1
            }
        },
        "content": [
            {
                 "reader": {
                    "name": "streamreader",
                    "parameter": {
                        "column" : [
                            {
                                "value": "DataX",
                                "type": "string"
                            },
                            {
                                "value": 19880808,
                                "type": "long"
                            },
                            {
                                "value": "1988-08-08 08:08:08",
                                "type": "date"
                            },
                            {
                                "value": true,
                                "type": "bool"
                            },
                            {
                                "value": "test",
                                "type": "bytes"
                            }
                        ],
                        "sliceRecordCount": 1000
                    }
                },
                "writer": {
                    "name": "mysqlwriter",
                    "parameter": {
                        "writeMode": "insert",
                        "username": "root",
                        "password": "root",
                        "column": [
                            "id",
                            "name"
                        ],
                        "session": [
                        	"set session sql_mode='ANSI'"
                        ],
                        "preSql": [
                            "delete from test"
                        ],
                        "connection": [
                            {
                                "jdbcUrl": "jdbc:mysql://127.0.0.1:3306/datax?useUnicode=true&characterEncoding=gbk",
                                "table": [
                                    "test"
                                ]
                            }
                        ]
                    }
                }
            }
        ]
    }
}

HA設置

 "hadoopConfig":{
         "dfs.nameservices": "testDfs",
         "dfs.ha.namenodes.testDfs": "namenode1,namenode2",
         "dfs.namenode.rpc-address.aliDfs.namenode1": "",
         "dfs.namenode.rpc-address.aliDfs.namenode2": "",
         "dfs.client.failover.proxy.provider.testDfs": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
 }


3.最終json配置

{
    "job": {
        "setting": {
            "speed": {
                "channel": 3
            }
        },
        "content": [
            {
                "reader": {
                    "name": "hdfsreader",
                    "parameter": {
                        "path": "hdfs://mycluster/user/hive/warehouse/ads.db/ads_paper_avgtimeandscore/dt=${dt}/dn=${dn}",
                        "defaultFS": "hdfs://mycluster",
                        "hadoopConfig":{
                            "dfs.nameservices": "mycluster",
                            "dfs.ha.namenodes.mycluster": "nn1,nn2,nn3",
                            "dfs.namenode.rpc-address.mycluster.nn1": "hadoop101:8020",
                            "dfs.namenode.rpc-address.mycluster.nn2": "hadoop102:8020",
                            "dfs.namenode.rpc-address.mycluster.nn3": "hadoop103:8020",
                            "dfs.client.failover.proxy.provider.mycluster": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
                                },
                        "column": [
                               {
                                "index": 0,
                                "type": "string"
                               },
                               {
                                "index": 1,
                                "type": "string"
                               },
                               {
                                "index": 2,
                                "type": "string"
                               },
                               {
                                "index": 3,
                                "type": "string"
                               }
                               ,
                               {
                                "value": "${dt}",
                                "type": "string"
                               },
                               {    
                                "value": "${dn}",
                                "type": "string"
                               }
                        ],
                        "fileType": "text",
                        "encoding": "UTF-8",
                        "fieldDelimiter": "\t"
                
                    }

                },
                "writer": {
                    "name": "mysqlwriter",
                    "parameter": {
                        "writeMode": "insert",
                        "username": "root",
                        "password": "123456",
                        "column": [
                            "paperviewid",
                            "paperviewname",
                            "avgscore",
                            "avgspendtime",
                            "dt",
                            "dn"
                        ],
                        "preSql": [
                            "delete from paper_avgtimeandscore where dt=${dt}"
                        ],
                        "connection": [
                            {
                                "jdbcUrl": "jdbc:mysql://hadoop101:3306/qz_paper?useUnicode=true&characterEncoding=utf8",
                                "table": [
                                    "paper_avgtimeandscore"
                                ]
                            }
                        ]
                    }
            }
        }
        ]
    }
}

4.執行腳本

./datax.py ./work_json/paper_avgtimeandscore.json -p "-Ddt=20190722 -Ddn=webA"


5.執行結果
在這裏插入圖片描述


6.注意
向MySQL中導入數據前,要先建好表,不然會報錯!找不到表!

CREATE TABLE paper_avgtimeandscore(
  paperviewid INT,
  paperviewname VARCHAR(100),
  avgscore decimal(4,1),
  avgspendtime decimal(10,1),
  dt VARCHAR(100),
  dn VARCHAR(100)
  );
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章