基於shard-jdbc中間件,實現數據分庫分表

一、水平分割 1、水平分庫 1)、概念: 以字段爲依據,按照一定策略,將一個庫中的數據拆分到多個庫中。 2)、結果 每個庫的結構都一樣;數據都不一樣; 所有庫的並集是全量數據; 2、水平分表 1)、概念 以字段爲依據,按照一定策略,將一個表中的數據拆分到多個表中。 2)、結果 每個表的結構都一樣;數據都不一樣; 所有表的並集是全量數據; 二、Shard-jdbc 中間件 1、架構圖

2、特點 1)、Sharding-JDBC直接封裝JDBC API,舊代碼遷移成本幾乎爲零。 2)、適用於任何基於Java的ORM框架,如Hibernate、Mybatis等 。 3)、可基於任何第三方的數據庫連接池,如DBCP、C3P0、 BoneCP、Druid等。 4)、以jar包形式提供服務,無proxy代理層,無需額外部署,無其他依賴。 5)、分片策略靈活,可支持等號、between、in等多維度分片,也可支持多分片鍵。 6)、SQL解析功能完善,支持聚合、分組、排序、limit、or等查詢。

三、項目演示 1、項目結構

springboot     2.0 版本
druid          1.1.13 版本
sharding-jdbc  3.1 版本

2、數據庫配置

一臺基礎庫映射(shard_one)

兩臺庫做分庫分表(shard_two,shard_three)。
表使用:table_one,table_two

3、核心代碼塊

1)、數據源配置文件

spring:
  datasource:
    # 數據源:shard_one
    dataOne:
      type: com.alibaba.druid.pool.DruidDataSource
      druid:
        driverClassName: com.mysql.jdbc.Driver
        url: jdbc:mysql://localhost:3306/shard_one?useUnicode=true&characterEncoding=UTF8&zeroDateTimeBehavior=convertToNull&useSSL=false
        username: root
        password: 123
        initial-size: 10
        max-active: 100
        min-idle: 10
        max-wait: 60000
        pool-prepared-statements: true
        max-pool-prepared-statement-per-connection-size: 20
        time-between-eviction-runs-millis: 60000
        min-evictable-idle-time-millis: 300000
        max-evictable-idle-time-millis: 60000
        validation-query: SELECT 1 FROM DUAL
        # validation-query-timeout: 5000
        test-on-borrow: false
        test-on-return: false
        test-while-idle: true
        connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
    # 數據源:shard_two
    dataTwo:
      type: com.alibaba.druid.pool.DruidDataSource
      druid:
        driverClassName: com.mysql.jdbc.Driver
        url: jdbc:mysql://localhost:3306/shard_two?useUnicode=true&characterEncoding=UTF8&zeroDateTimeBehavior=convertToNull&useSSL=false
        username: root
        password: 123
        initial-size: 10
        max-active: 100
        min-idle: 10
        max-wait: 60000
        pool-prepared-statements: true
        max-pool-prepared-statement-per-connection-size: 20
        time-between-eviction-runs-millis: 60000
        min-evictable-idle-time-millis: 300000
        max-evictable-idle-time-millis: 60000
        validation-query: SELECT 1 FROM DUAL
        # validation-query-timeout: 5000
        test-on-borrow: false
        test-on-return: false
        test-while-idle: true
        connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
    # 數據源:shard_three
    dataThree:
      type: com.alibaba.druid.pool.DruidDataSource
      druid:
        driverClassName: com.mysql.jdbc.Driver
        url: jdbc:mysql://localhost:3306/shard_three?useUnicode=true&characterEncoding=UTF8&zeroDateTimeBehavior=convertToNull&useSSL=false
        username: root
        password: 123
        initial-size: 10
        max-active: 100
        min-idle: 10
        max-wait: 60000
        pool-prepared-statements: true
        max-pool-prepared-statement-per-connection-size: 20
        time-between-eviction-runs-millis: 60000
        min-evictable-idle-time-millis: 300000
        max-evictable-idle-time-millis: 60000
        validation-query: SELECT 1 FROM DUAL
        # validation-query-timeout: 5000
        test-on-borrow: false
        test-on-return: false
        test-while-idle: true
        connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000

2)、數據庫分庫策略

/**
 * 數據庫映射計算
 */
public class DataSourceAlg implements PreciseShardingAlgorithm<String> {

    private static Logger LOG = LoggerFactory.getLogger(DataSourceAlg.class);
    @Override
    public String doSharding(Collection<String> names, PreciseShardingValue<String> value) {
        LOG.debug("分庫算法參數 {},{}",names,value);
        int hash = HashUtil.rsHash(String.valueOf(value.getValue()));
        return "ds_" + ((hash % 2) + 2) ;
    }
}

3)、數據表1分表策略

/**
 * 分表算法
 */
public class TableOneAlg implements PreciseShardingAlgorithm<String> {
    private static Logger LOG = LoggerFactory.getLogger(TableOneAlg.class);
    /**
     * 該表每個庫分5張表
     */
    @Override
    public String doSharding(Collection<String> names, PreciseShardingValue<String> value) {
        LOG.debug("分表算法參數 {},{}",names,value);
        int hash = HashUtil.rsHash(String.valueOf(value.getValue()));
        return "table_one_" + (hash % 5+1);
    }
}

4)、數據表2分表策略

/**
 * 分表算法
 */
public class TableTwoAlg implements PreciseShardingAlgorithm<String> {
    private static Logger LOG = LoggerFactory.getLogger(TableTwoAlg.class);
    /**
     * 該表每個庫分5張表
     */
    @Override
    public String doSharding(Collection<String> names, PreciseShardingValue<String> value) {
        LOG.debug("分表算法參數 {},{}",names,value);
        int hash = HashUtil.rsHash(String.valueOf(value.getValue()));
        return "table_two_" + (hash % 5+1);
    }
}

5)、數據源集成配置

/**
 * 數據庫分庫分表配置
 */
@Configuration
public class ShardJdbcConfig {
    // 省略了 druid 配置,源碼中有
    /**
     * Shard-JDBC 分庫配置
     */
    @Bean
    public DataSource dataSource (@Autowired DruidDataSource dataOneSource,
                                  @Autowired DruidDataSource dataTwoSource,
                                  @Autowired DruidDataSource dataThreeSource) throws Exception {
        ShardingRuleConfiguration shardJdbcConfig = new ShardingRuleConfiguration();
        shardJdbcConfig.getTableRuleConfigs().add(getTableRule01());
        shardJdbcConfig.getTableRuleConfigs().add(getTableRule02());
        shardJdbcConfig.setDefaultDataSourceName("ds_0");
        Map<String,DataSource> dataMap = new LinkedHashMap<>() ;
        dataMap.put("ds_0",dataOneSource) ;
        dataMap.put("ds_2",dataTwoSource) ;
        dataMap.put("ds_3",dataThreeSource) ;
        Properties prop = new Properties();
        return ShardingDataSourceFactory.createDataSource(dataMap, shardJdbcConfig, new HashMap<>(), prop);
    }

    /**
     * Shard-JDBC 分表配置
     */
    private static TableRuleConfiguration getTableRule01() {
        TableRuleConfiguration result = new TableRuleConfiguration();
        result.setLogicTable("table_one");
        result.setActualDataNodes("ds_${2..3}.table_one_${1..5}");
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new DataSourceAlg()));
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new TableOneAlg()));
        return result;
    }
    private static TableRuleConfiguration getTableRule02() {
        TableRuleConfiguration result = new TableRuleConfiguration();
        result.setLogicTable("table_two");
        result.setActualDataNodes("ds_${2..3}.table_two_${1..5}");
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new DataSourceAlg()));
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new TableTwoAlg()));
        return result;
    }
}

6)、測試代碼執行流程

@RestController
public class ShardController {
    @Resource
    private ShardService shardService ;
    /**
     * 1、建表流程
     */
    @RequestMapping("/createTable")
    public String createTable (){
        shardService.createTable();
        return "success" ;
    }
    /**
     * 2、生成表 table_one 數據
     */
    @RequestMapping("/insertOne")
    public String insertOne (){
        shardService.insertOne();
        return "SUCCESS" ;
    }
    /**
     * 3、生成表 table_two 數據
     */
    @RequestMapping("/insertTwo")
    public String insertTwo (){
        shardService.insertTwo();
        return "SUCCESS" ;
    }
    /**
     * 4、查詢表 table_one 數據
     */
    @RequestMapping("/selectOneByPhone/{phone}")
    public TableOne selectOneByPhone (@PathVariable("phone") String phone){
        return shardService.selectOneByPhone(phone);
    }
    /**
     * 5、查詢表 table_one 數據
     */
    @RequestMapping("/selectTwoByPhone/{phone}")
    public TableTwo selectTwoByPhone (@PathVariable("phone") String phone){
        return shardService.selectTwoByPhone(phone);
    }
}

本文分享自微信公衆號 - 知了一笑(cicada_smile)

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章