notes notes
首页
读书笔记
系统设计
项目实战
学习笔记
源码
运维
其它
极客时间 (opens new window)
GitHub (opens new window)
首页
读书笔记
系统设计
项目实战
学习笔记
源码
运维
其它
极客时间 (opens new window)
GitHub (opens new window)
  • 并发编程

    • 并发编程
    • 多线程
    • 高级篇
  • 设计模式

    • 设计模式
  • 网络编程

    • Netty

      • NIO基础
      • Netty入门
      • Netty进阶
      • 优化与源码
  • 源码篇

    • 环境搭建
    • Spring
  • 云原生

    • Kubernetes
    • Helm
  • ElasticSearch

    • ElasticSearch
  • Java 虚拟机

    • 深入拆解 Java 虚拟机
    • JVM与GC调优
  • MQ

    • RabbitMQ

      • RabbitMQ笔记
      • RabbitMQ集群搭建文档
  • Redis

    • Redis进阶
  • ShardingSphere

    • Sharding-JDBC
      • 分库
        • 建库
        • 结构
        • 代码
        • yaml 配置
        • 测试广播表
        • 测试分库
      • 分表
      • 分库分表
      • 读写分离
        • 环境搭建
        • 编写 docker-compose
        • 创建所需目录
        • 编写主库配置文件
        • 编写从库配置文件
        • 启动docker-compose
        • docker ps查看是否正常启动
        • 创建用户
        • 查看主库状态
        • 从库设置主库信息
        • 开启主从复制
        • 查看从库同步状态
        • 配置文件
      • 整合 Seata
  • SpringCloud

    • SpringCloud
  • ZooKeeper

    • ZooKeeper
  • 学习笔记
  • ShardingSphere
starry
2023-08-03
目录

Sharding-JDBC

https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/spring-boot-starter/ (opens new window) 只是简单使用的话,配置下yaml文件即可

# 分库

# 建库

/*
Navicat MySQL Data Transfer

Source Server         : localhost
Source Server Version : 50734
Source Host           : localhost:3306
Source Database       : demo_ds_0

Target Server Type    : MYSQL
Target Server Version : 50734
File Encoding         : 65001

Date: 2022-06-08 22:38:13
*/

SET FOREIGN_KEY_CHECKS=0;

-- ----------------------------
-- Table structure for t_address
-- ----------------------------
DROP TABLE IF EXISTS `t_address`;
CREATE TABLE `t_address` (
  `address_id` bigint(11) NOT NULL AUTO_INCREMENT,
  `address_name` varchar(50) NOT NULL,
  PRIMARY KEY (`address_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534544128980316163 DEFAULT CHARSET=utf8mb4;

-- ----------------------------
-- Table structure for t_order
-- ----------------------------
DROP TABLE IF EXISTS `t_order`;
CREATE TABLE `t_order` (
  `order_id` bigint(20) NOT NULL AUTO_INCREMENT,
  `user_id` bigint(20) NOT NULL,
  PRIMARY KEY (`order_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534534001644576771 DEFAULT CHARSET=utf8mb4;

-- ----------------------------
-- Table structure for t_order_item
-- ----------------------------
DROP TABLE IF EXISTS `t_order_item`;
CREATE TABLE `t_order_item` (
  `order_item_id` bigint(20) NOT NULL AUTO_INCREMENT,
  `order_id` bigint(20) NOT NULL,
  `user_id` bigint(20) NOT NULL,
  `status` varchar(50) DEFAULT NULL,
  PRIMARY KEY (`order_item_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534538773588094978 DEFAULT CHARSET=utf8mb4;

/*
Navicat MySQL Data Transfer

Source Server         : localhost
Source Server Version : 50734
Source Host           : localhost:3306
Source Database       : demo_ds_1

Target Server Type    : MYSQL
Target Server Version : 50734
File Encoding         : 65001

Date: 2022-06-08 22:38:26
*/

SET FOREIGN_KEY_CHECKS=0;

-- ----------------------------
-- Table structure for t_address
-- ----------------------------
DROP TABLE IF EXISTS `t_address`;
CREATE TABLE `t_address` (
  `address_id` bigint(11) NOT NULL AUTO_INCREMENT,
  `address_name` varchar(50) NOT NULL,
  PRIMARY KEY (`address_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534543709537333251 DEFAULT CHARSET=utf8mb4;

-- ----------------------------
-- Table structure for t_order
-- ----------------------------
DROP TABLE IF EXISTS `t_order`;
CREATE TABLE `t_order` (
  `order_id` bigint(20) NOT NULL AUTO_INCREMENT,
  `user_id` bigint(20) NOT NULL,
  PRIMARY KEY (`order_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534533984896720898 DEFAULT CHARSET=utf8mb4;

-- ----------------------------
-- Table structure for t_order_item
-- ----------------------------
DROP TABLE IF EXISTS `t_order_item`;
CREATE TABLE `t_order_item` (
  `order_item_id` bigint(20) NOT NULL AUTO_INCREMENT,
  `user_id` bigint(20) NOT NULL,
  `order_id` bigint(20) NOT NULL,
  `status` varchar(50) DEFAULT NULL,
  PRIMARY KEY (`order_item_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1534538730751668227 DEFAULT CHARSET=utf8mb4;

# 结构

demo_ds_0
├── t_address			地址,广播表
├── t_order				订单,分库存放
└── t_order_item	订单-商品,分库存放(订单表和订单-商品表绑定到一起,绑定表)
demo_ds_1
├── t_address
├── t_order
└── t_order_item

# 代码

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>com.starry</groupId>
    <artifactId>sharding-sphere-demo</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <name>sharding-sphere-demo</name>
    <description>Demo project for Spring Boot</description>

    <properties>
        <java.version>1.8</java.version>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
        <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
        <spring-boot.version>2.3.7.RELEASE</spring-boot.version>
    </properties>

    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
<!--        <dependency>-->
<!--            <groupId>org.apache.shardingsphere</groupId>-->
<!--            <artifactId>shardingsphere-transaction-base-seata-at</artifactId>-->
<!--            <version>5.1.1</version>-->
<!--        </dependency>-->
        <dependency>
            <groupId>org.apache.shardingsphere</groupId>
            <artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
            <version>5.1.1</version>
        </dependency>

        <dependency>
            <groupId>com.baomidou</groupId>
            <artifactId>mybatis-plus-boot-starter</artifactId>
            <version>3.4.2</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.junit.vintage</groupId>
                    <artifactId>junit-vintage-engine</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
    </dependencies>

    <dependencyManagement>
        <dependencies>
            <dependency>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-dependencies</artifactId>
                <version>${spring-boot.version}</version>
                <type>pom</type>
                <scope>import</scope>
            </dependency>
        </dependencies>
    </dependencyManagement>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.8.1</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
                <version>2.3.7.RELEASE</version>
                <configuration>
                    <mainClass>com.starry.shardingspheredemo.ShardingSphereDemoApplication</mainClass>
                </configuration>
                <executions>
                    <execution>
                        <id>repackage</id>
                        <goals>
                            <goal>repackage</goal>
                        </goals>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

</project>

# yaml 配置

spring:
  profiles:
    # 分库
    #include: sharding-databases
    # 分表
    include: sharding-tables
server:
  port: 8080
spring:
  shardingsphere:
    # 数据源信息
    datasource:
      # 数据源名称
      names: ds-0,ds-1
      # 具体数据源配置
      ds-0:
        jdbc-url: jdbc:mysql://localhost:3306/demo_ds_0?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
      ds-1:
        jdbc-url: jdbc:mysql://localhost:3306/demo_ds_1?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
    # 分片规则
    rules:
      sharding:
        # 默认分库策略
        default-database-strategy:
          # 标准分片
          standard:
            # 列名
            sharding-column: user_id
            # 分库策略名
            sharding-algorithm-name: my-algorithm


        # 指定表配置
        tables:
          t_order:
            # 由数据源名 + 表名组成,以小数点分隔。
            # 多个表以逗号分隔,支持 inline 表达式。
            # 缺省表示使用已知数据源与逻辑表名称生成数据节点,用于广播表(即每个库中都需要一个同样的表用于关联查询,多为字典表)
            # 或只分库不分表且所有库的表结构完全一致的情况
            actual-data-nodes: ds-$->{0..1}.t_order
            key-generate-strategy:
              column: order_id
              key-generate-algorithm-name: snowflake
          t_order_item:
            actual-data-nodes: ds-$->{0..1}.t_order_item
            key-generate-strategy:
              column: order_item_id
              key-generate-algorithm-name: snowflake
          t_address:
            actual-data-nodes: ds-$->{0..1}.t_access
            key-generate-strategy:
              column: address_id
              key-generate-algorithm-name: uuid

        # 分片策略
        sharding-algorithms:
          # 对应自定义的策略名
          my-algorithm:
            # 使用 inline 行表达式
            type: INLINE
            props:
              # 策略表达式
              algorithm-expression: ds-$->{user_id % 2}

        # key 生成策略
        key-generators:
          # 对应自定义的策略名
          snowflake:
            type: SNOWFLAKE
          uuid:
            type: UUID


        # 绑定表,指分片规则一致的一组分片表。
        # 使用绑定表进行多表关联查询时,必须使用分片键进行关联,否则会出现笛卡尔积关联或跨库关联,从而影响查询效率。
        # 其中 t_order 表由于指定了分片条件,ShardingSphere 将会以它作为整个绑定表的主表。
        # 所有路由计算将会只使用主表的策略,那么 t_order_item 表的分片计算将会使用 t_order 的条件。
        binding-tables:
          - t_order,t_order_item

        # 广播表
        # 指所有的分片数据源中都存在的表,表结构及其数据在每个数据库中均完全一致。
        # 适用于数据量不大且需要与海量数据的表进行关联查询的场景,例如:字典表。
        broadcast-tables: t_address


    # 其他属性配置
    props:
      sql-show: true
  • 配置数据源
  • 配置默认的分库策略
  • 配置列的 id 生成算法
  • 配置分片策略
  • 配置 id 生成策略
  • 配置绑定表
  • 配置广播表
  • 显示 sql

controller

# 测试广播表

@RestController
public class AddressController {


    @Resource
    AddressService addressService;

    @PostMapping("address")
    public String addAddress(@RequestBody Address address) {
        addressService.save(address);
        return "success";
    }

    @GetMapping("address/{addressId}")
    public Address getAddress(@PathVariable Long addressId) {
        return addressService.getById(addressId);
    }

}

添加数据

POST http://localhost:8080/address
Content-Type: application/json

{
  "addressName": "北京"
}

所有数据库都添加

注意:如果一个数据库未添加成功,也不会报错

2022-06-08 22:54:33.696  INFO 43912 --- [nio-8080-exec-5] ShardingSphere-SQL                       : Logic SQL: INSERT INTO t_address  ( address_id, address_name )  VALUES  ( ?, ? )
2022-06-08 22:54:33.696  INFO 43912 --- [nio-8080-exec-5] ShardingSphere-SQL                       : SQLStatement: MySQLInsertStatement(setAssignment=Optional.empty, onDuplicateKeyColumns=Optional.empty)
2022-06-08 22:54:33.696  INFO 43912 --- [nio-8080-exec-5] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: INSERT INTO t_address  ( address_id, address_name )  VALUES  (?, ?) ::: [1534549448309895169, 北京]
2022-06-08 22:54:33.696  INFO 43912 --- [nio-8080-exec-5] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: INSERT INTO t_address  ( address_id, address_name )  VALUES  (?, ?) ::: [1534549448309895169, 北京]

查询数据

GET http://localhost:8080/address/1534549448309895169

可以看到执行顺序并不是轮询,而是随机

2022-06-08 22:59:15.398  INFO 43912 --- [io-8080-exec-10] ShardingSphere-SQL                       : Logic SQL: SELECT address_id,address_name FROM t_address WHERE address_id=? 
2022-06-08 22:59:15.399  INFO 43912 --- [io-8080-exec-10] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 22:59:15.399  INFO 43912 --- [io-8080-exec-10] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: SELECT address_id,address_name FROM t_address WHERE address_id=?  ::: [1534549448309895169]
2022-06-08 22:59:21.011  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : Logic SQL: SELECT address_id,address_name FROM t_address WHERE address_id=? 
2022-06-08 22:59:21.011  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 22:59:21.011  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: SELECT address_id,address_name FROM t_address WHERE address_id=?  ::: [1534549448309895169]
2022-06-08 22:59:24.920  INFO 43912 --- [nio-8080-exec-2] ShardingSphere-SQL                       : Logic SQL: SELECT address_id,address_name FROM t_address WHERE address_id=? 
2022-06-08 22:59:24.920  INFO 43912 --- [nio-8080-exec-2] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 22:59:24.920  INFO 43912 --- [nio-8080-exec-2] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: SELECT address_id,address_name FROM t_address WHERE address_id=?  ::: [1534549448309895169]

# 测试分库

@RestController
public class OrderController {


    @Resource
    OrderService orderService;

    @PostMapping("order")
    public String addOrder(@RequestBody Order order) {
        orderService.save(order);
        return "success";
    }

    @GetMapping("order/{orderId}")
    public Order getOrder(@PathVariable Long orderId) {
        return orderService.getById(orderId);
    }

}

添加数据

###
POST http://localhost:8080/order
Content-Type: application/json

{
  "userId": "5"
}

###
POST http://localhost:8080/order
Content-Type: application/json

{
  "userId": "6"
}

###
POST http://localhost:8080/order
Content-Type: application/json

{
  "userId": "2"
}

根据传入的 _userId _添加到不同的库,偶数添加到 ds-0、奇数添加到 ds-1;并且使用雪花算法,自动生成了 id

2022-06-08 23:04:42.075  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : Logic SQL: INSERT INTO t_order  ( order_id,
user_id )  VALUES  ( ?,
? )
2022-06-08 23:04:42.075  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : SQLStatement: MySQLInsertStatement(setAssignment=Optional.empty, onDuplicateKeyColumns=Optional.empty)
2022-06-08 23:04:42.075  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: INSERT INTO t_order  ( order_id,
user_id )  VALUES  (?, ?) ::: [1534551999591772161, 5]
2022-06-08 23:08:40.888  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : Logic SQL: INSERT INTO t_order  ( order_id,
user_id )  VALUES  ( ?,
? )
2022-06-08 23:08:40.888  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : SQLStatement: MySQLInsertStatement(setAssignment=Optional.empty, onDuplicateKeyColumns=Optional.empty)
2022-06-08 23:08:40.888  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: INSERT INTO t_order  ( order_id,
user_id )  VALUES  (?, ?) ::: [1534553001732632577, 6]
2022-06-08 23:09:00.979  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : Logic SQL: INSERT INTO t_order  ( order_id,
user_id )  VALUES  ( ?,
? )
2022-06-08 23:09:00.979  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : SQLStatement: MySQLInsertStatement(setAssignment=Optional.empty, onDuplicateKeyColumns=Optional.empty)
2022-06-08 23:09:00.980  INFO 43912 --- [nio-8080-exec-1] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: INSERT INTO t_order  ( order_id,
user_id )  VALUES  (?, ?) ::: [1534553085945868290, 2]

查询数据

###
GET http://localhost:8080/order/1534553085945868290

###
GET http://localhost:8080/order/1534553001732632577

###
GET http://localhost:8080/order/1534551999591772161

orderId没有指定分片规则,就是查所有的库

2022-06-08 23:13:42.121  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : Logic SQL: SELECT order_id,user_id FROM t_order WHERE order_id=? 
2022-06-08 23:13:42.121  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 23:13:42.121  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534553085945868290]
2022-06-08 23:13:42.121  INFO 43912 --- [nio-8080-exec-7] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534553085945868290]
2022-06-08 23:13:45.427  INFO 43912 --- [nio-8080-exec-8] ShardingSphere-SQL                       : Logic SQL: SELECT order_id,user_id FROM t_order WHERE order_id=? 
2022-06-08 23:13:45.427  INFO 43912 --- [nio-8080-exec-8] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 23:13:45.427  INFO 43912 --- [nio-8080-exec-8] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534553001732632577]
2022-06-08 23:13:45.427  INFO 43912 --- [nio-8080-exec-8] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534553001732632577]
2022-06-08 23:13:46.918  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : Logic SQL: SELECT order_id,user_id FROM t_order WHERE order_id=? 
2022-06-08 23:13:46.918  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : SQLStatement: MySQLSelectStatement(table=Optional.empty, limit=Optional.empty, lock=Optional.empty, window=Optional.empty)
2022-06-08 23:13:46.918  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : Actual SQL: ds-0 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534551999591772161]
2022-06-08 23:13:46.918  INFO 43912 --- [nio-8080-exec-9] ShardingSphere-SQL                       : Actual SQL: ds-1 ::: SELECT order_id,user_id FROM t_order WHERE order_id=?  ::: [1534551999591772161]
@RestController
public class OrderItemController {


    @Resource
    OrderItemService orderItemService;

    @PostMapping("orderItem")
    public String addOrderItem(@RequestBody OrderItem orderItem) {
        orderItemService.save(orderItem);
        return "success";
    }

    @GetMapping("orderItem/{orderItemId}")
    public OrderItem getOrderItem(@PathVariable Long orderItemId) {
        return orderItemService.getById(orderItemId);
    }

}

# 分表

表结构

demo_ds
├── t_order_0			
├── t_order_1
├── t_order_item_0			
└── t_order_item_1
spring:
  shardingsphere:
    # 数据源信息
    datasource:
      # 数据源名称
      names: ds
      # 具体数据源配置
      ds:
        jdbc-url: jdbc:mysql://localhost:3306/demo_ds?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
    # 分片规则
    rules:
      sharding:
        # 指定表配置
        tables:
          t_order:
            # 实际表
            actual-data-nodes: ds.t_order_$->{0..1}
            # id 生成策略
            key-generate-strategy:
              column: order_id
              key-generate-algorithm-name: snowflake
            # 分片策略
            table-strategy:
              standard:
                sharding-column: order_id
                sharding-algorithm-name: t-order-inline
          t_order_item:
            actual-data-nodes: ds.t_order_item_$->{0..1}
            key-generate-strategy:
              column: order_item_id
              key-generate-algorithm-name: snowflake
            table-strategy:
              standard:
                sharding-column: order_item_id
                sharding-algorithm-name: t-order-item-inline

        # 分片策略
        sharding-algorithms:
          # 对应自定义的策略名
          t-order-inline:
            # 使用 inline 行表达式
            type: INLINE
            props:
              # 策略表达式
              algorithm-expression: t_order_$->{order_id % 2}
          t-order-item-inline:
            type: INLINE
            props:
              algorithm-expression: t_order_item_$->{order_item_id % 2}

        # key 生成策略
        key-generators:
          # 对应自定义的策略名
          snowflake:
            type: SNOWFLAKE


        # 绑定表,指分片规则一致的一组分片表。
        # 使用绑定表进行多表关联查询时,必须使用分片键进行关联,否则会出现笛卡尔积关联或跨库关联,从而影响查询效率。
        # 其中 t_order 表由于指定了分片条件,ShardingSphere 将会以它作为整个绑定表的主表。
        # 所有路由计算将会只使用主表的策略,那么 t_order_item 表的分片计算将会使用 t_order 的条件。
        binding-tables:
          - t_order,t_order_item

        # 广播表
        # 指所有的分片数据源中都存在的表,表结构及其数据在每个数据库中均完全一致。
        # 适用于数据量不大且需要与海量数据的表进行关联查询的场景,例如:字典表。
        broadcast-tables: t_address


    # 其他属性配置
    props:
      sql-show: true


  • 配置单个数据源
  • 给要分片的表配置分片策略、id 生成策略

修改要激活的配置文件,使用原来的 controller 测试即可

# 分库分表

表结构

demo_ds_11
├── t_order_0			
├── t_order_1
├── t_order_item_0			
└── t_order_item_1
demo_ds_12
├── t_order_0			
├── t_order_1
├── t_order_item_0			
└── t_order_item_1
spring:
  shardingsphere:
    # 数据源信息
    datasource:
      # 数据源名称
      names: ds-0,ds-1
      # 具体数据源配置
      ds-0:
        jdbc-url: jdbc:mysql://localhost:3306/demo_ds_11?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
      ds-1:
        jdbc-url: jdbc:mysql://localhost:3306/demo_ds_12?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
    # 分片规则
    rules:
      sharding:
        # 默认分库策略
        default-database-strategy:
          standard:
            sharding-column: user_id
            sharding-algorithm-name: my-algorithm


        # 指定表配置
        tables:
          t_order:
            actual-data-nodes: ds-$->{0..1}.t_order_$->{0..1}
            key-generate-strategy:
              column: order_id
              key-generate-algorithm-name: snowflake
            table-strategy:
              standard:
                sharding-column: order_id
                sharding-algorithm-name: t-order-inline

          t_order_item:
            actual-data-nodes: ds-$->{0..1}.t_order_item_$->{0..1}
            key-generate-strategy:
              column: order_item_id
              key-generate-algorithm-name: snowflake
            table-strategy:
              standard:
                sharding-column: order_item_id
                sharding-algorithm-name: t-order-item-inline

        # 分片策略
        sharding-algorithms:
          my-algorithm:
            type: INLINE
            props:
              algorithm-expression: ds-$->{user_id % 2}
          t-order-inline:
            type: INLINE
            props:
              algorithm-expression: t_order_$->{order_id % 2}
          t-order-item-inline:
            type: INLINE
            props:
              algorithm-expression: t_order_item_$->{order_item_id % 2}

        # key 生成策略
        key-generators:
          snowflake:
            type: SNOWFLAKE
          uuid:
            type: UUID


        # 绑定表,指分片规则一致的一组分片表。
        binding-tables:
          - t_order,t_order_item

        # 广播表
        broadcast-tables: t_address


    # 其他属性配置
    props:
      sql-show: true


  • 配置多个数据源
  • 配置默认数据库分片规则
  • 配置表分片规则、id 生成策略

# 读写分离

# 环境搭建

https://dev.mysql.com/doc/refman/5.7/en/environment-variables.html (opens new window) https://hub.docker.com/_/mysql (opens new window) https://docs.docker.com/engine/reference/commandline/compose/ (opens new window)

docker 配置,一主两从

# 编写 docker-compose

version: '3.0'
services:
  mysql_master:
    image: mysql:5.7
    container_name: mysql_master
    restart: always
    ports:
      - '3306:3306'
    environment:
      MYSQL_ROOT_PASSWORD: root
      TZ: Asia/Shanghai
    volumes:
      - /mydata/mysql_master/data:/var/lib/mysql
      - /mydata/mysql_master/conf/my.cnf:/etc/mysql/my.cnf
  mysql_slave_0:
    image: mysql:5.7
    container_name: mysql_slave_0
    restart: always
    ports:
      - '3307:3306'
    environment:
      MYSQL_ROOT_PASSWORD: root
      TZ: Asia/Shanghai
    volumes:
      - /mydata/mysql_slave_0/data:/var/lib/mysql
      - /mydata/mysql_slave_0/conf/my.cnf:/etc/mysql/my.cnf
  mysql_slave_1:
    image: mysql:5.7
    container_name: mysql_slave_1
    restart: always
    ports:
      - '3308:3306'
    environment:
      MYSQL_ROOT_PASSWORD: root
      TZ: Asia/Shanghai
    volumes:
      - /mydata/mysql_slave_1/data:/var/lib/mysql
      - /mydata/mysql_slave_1/conf/my.cnf:/etc/mysql/my.cnf

# 创建所需目录

[root@localhost ~]# rm -rf /mydata
[root@localhost ~]# mkdir -p /mydata/mysql_master/data
[root@localhost ~]# mkdir -p /mydata/mysql_master/conf
[root@localhost ~]# mkdir -p /mydata/mysql_slave_0/data
[root@localhost ~]# mkdir -p /mydata/mysql_slave_0/conf
[root@localhost ~]# mkdir -p /mydata/mysql_slave_1/data
[root@localhost ~]# mkdir -p /mydata/mysql_slave_1/conf

# 编写主库配置文件

[root@localhost ~]# vim /mydata/mysql_master/conf/my.cnf
[mysqld]
## 同一局域网内注意要唯一
server-id=100  
## 开启二进制日志功能,可以随便取(关键)
log-bin=mysql-bin

# 编写从库配置文件

[root@localhost ~]# vim /mydata/mysql_slave_0/conf/my.cnf
[mysqld]
## 同一局域网内注意要唯一
server-id=200
## 开启二进制日志功能,可以随便取(关键)
log-bin=mysql-bin
## 设置为只读,该项如果不设置,表示slave可读可写
read_only= 1
[root@localhost ~]# vim /mydata/mysql_slave_1/conf/my.cnf
[mysqld]
## 同一局域网内注意要唯一
server-id=300
## 开启二进制日志功能,可以随便取(关键)
log-bin=mysql-bin
## 设置为只读,该项如果不设置,表示slave可读可写
read_only= 1

# 启动docker-compose

docker-compose -f docker-compose-mysql.yml up -d

# docker ps查看是否正常启动

[root@localhost data]# docker ps
CONTAINER ID   IMAGE       COMMAND                  CREATED         STATUS         PORTS                                                            NAMES
85fd71ec96ba   mysql:5.7   "docker-entrypoint.s…"   6 minutes ago   Up 6 minutes   3306/tcp, 33060/tcp, 0.0.0.0:3308->3308/tcp, :::3308->3308/tcp   mysql_slave_1
51c6cfbc2993   mysql:5.7   "docker-entrypoint.s…"   6 minutes ago   Up 6 minutes   3306/tcp, 33060/tcp, 0.0.0.0:3307->3307/tcp, :::3307->3307/tcp   mysql_slave_0
f6557f395d0f   mysql:5.7   "docker-entrypoint.s…"   6 minutes ago   Up 6 minutes   0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp             mysql_master

# 创建用户

使用客户端工具连接,在master上创建主从复制权限的用户

# 创建一个带有主从复制权限的用户 用户名slave 密码123321
grant replication slave on *.* to 'slave'@'%' identified by '123321';
# 刷新权限
flush privileges;

# 查看主库状态

mysql> show master status;
+------------------+----------+--------------+------------------+-------------------+
| File             | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+------------------+----------+--------------+------------------+-------------------+
| mysql-bin.000005 |     1134 |              |                  |                   |
+------------------+----------+--------------+------------------+-------------------+
1 row in set (0.00 sec)

当前使用到的binlog文件和偏移量

# 从库设置主库信息

mysql> change master to
    -> master_host='192.168.83.130',
    -> master_user='slave',
    -> master_password='123321',
    -> master_log_file='mysql-bin.000005',
    -> master_log_pos=1134;
Query OK, 0 rows affected, 2 warnings (0.01 sec)

# 开启主从复制

mysql> start slave;
Query OK, 0 rows affected (0.00 sec)

# 查看从库同步状态

mysql> show slave status\G;
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: 192.168.83.130
                  Master_User: slave
                  Master_Port: 3306
                Connect_Retry: 60
              Master_Log_File: mysql-bin.000005
          Read_Master_Log_Pos: 1134
               Relay_Log_File: f81999cf0278-relay-bin.000002
                Relay_Log_Pos: 320
        Relay_Master_Log_File: mysql-bin.000005
             Slave_IO_Running: Yes
            Slave_SQL_Running: Yes
            ......

另外一个从库也是相同的操作

  • 从库设置主库信息
  • 开启主从复制
  • 查看从库同步状态

主库新建数据库、新建表、添加数据 等写操作,从库也会相应的变化

# 配置文件

表结构

readwrite-ds		【write】
└── t_order
readwrite-ds		【read】
└── t_order
readwrite-ds		【read】
└── t_order
spring:
  shardingsphere:
    # 数据源信息
    datasource:
      # 数据源名称
      names: write-ds,read-ds-0,read-ds-1
      # 具体数据源配置
      write-ds:
        jdbc-url: jdbc:mysql://192.168.83.130:3306/readwrite-ds?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
      read-ds-0:
        jdbc-url: jdbc:mysql://192.168.83.130:3307/readwrite-ds?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root
      read-ds-1:
        jdbc-url: jdbc:mysql://192.168.83.130:3307/readwrite-ds?serverTimezone=UTC&useSSL=false&useUnicode=true&characterEncoding=utf8
        type: com.zaxxer.hikari.HikariDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        username: root
        password: root

    # 分片规则
    rules:
      readwrite-splitting:
        data-sources:
          # 逻辑数据源名称
          readwrite_ds:
            # 读写分离类型 Static,Dynamic
            type: Static
            props:
              # 写库数据源
              write-data-source-name: write-ds
              # 读库数据源
              read-data-source-names: read-ds-0,read-ds-1
            # 负载均衡算法名称
            load-balancer-name: round_robin
        load-balancers:
          # 负载均衡算法名称
          round_robin:
            type: ROUND_ROBIN






    # 其他属性配置
    props:
      sql-show: true
  • 配置数据源
  • 配置读写库
  • 配置负载均衡策略

# 整合 Seata

shardingsphere官方示例 (opens new window),springboot2.3.x使用5.x的shardingsphere会整合失败Cannot subclass final class org.apache.shardingsphere.driver.jdbc.core.datasource.ShardingSphereDataSource;在springboot2.5.x按照官方示例没问题

  • https://github.com/seata/seata/issues/3850 (opens new window)
  • https://github.com/apache/shardingsphere/issues/13550 (opens new window)
  • https://github.com/apache/shardingsphere/issues/15086 (opens new window)

seata 官方示例 (opens new window),seata官方也提供了整合的示例,但是是低版本的4.1.1

使用AT模式 seata配置笔记 (opens new window)

使用nacos作为注册和配置中心,启动nacos

bin/startup.cmd

配置seata,修改2个配置,使用nacos,使用db存储逆向sql,数据库建表(seata-server所需的表)

  • registry.conf

seata-server(TC)注册到nacos,seata-client(TM、RM)直接去nacos拿到TC的地址 server使用file的配置方式

registry {
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  type = "nacos"

  nacos {
    application = "seata-server"
    serverAddr = "127.0.0.1:8848"
    group = "SEATA_GROUP"
    namespace = ""
    cluster = "default"
    username = ""
    password = ""
  }
  eureka {
    serviceUrl = "http://localhost:8761/eureka"
    application = "default"
    weight = "1"
  }
  redis {
    serverAddr = "localhost:6379"
    db = 0
    password = ""
    cluster = "default"
    timeout = 0
  }
  zk {
    cluster = "default"
    serverAddr = "127.0.0.1:2181"
    sessionTimeout = 6000
    connectTimeout = 2000
    username = ""
    password = ""
  }
  consul {
    cluster = "default"
    serverAddr = "127.0.0.1:8500"
    aclToken = ""
  }
  etcd3 {
    cluster = "default"
    serverAddr = "http://localhost:2379"
  }
  sofa {
    serverAddr = "127.0.0.1:9603"
    application = "default"
    region = "DEFAULT_ZONE"
    datacenter = "DefaultDataCenter"
    cluster = "default"
    group = "SEATA_GROUP"
    addressWaitTime = "3000"
  }
  file {
    name = "file.conf"
  }
}

config {
  # file、nacos 、apollo、zk、consul、etcd3
  type = "file"

  nacos {
    serverAddr = "127.0.0.1:8848"
    namespace = ""
    group = "SEATA_GROUP"
    username = ""
    password = ""
    dataId = "seataServer.properties"
  }
  consul {
    serverAddr = "127.0.0.1:8500"
    aclToken = ""
  }
  apollo {
    appId = "seata-server"
    ## apolloConfigService will cover apolloMeta
    apolloMeta = "http://192.168.1.204:8801"
    apolloConfigService = "http://192.168.1.204:8080"
    namespace = "application"
    apolloAccesskeySecret = ""
    cluster = "seata"
  }
  zk {
    serverAddr = "127.0.0.1:2181"
    sessionTimeout = 6000
    connectTimeout = 2000
    username = ""
    password = ""
    nodePath = "/seata/seata.properties"
  }
  etcd3 {
    serverAddr = "http://localhost:2379"
  }
  file {
    name = "file.conf"
  }
}

  • file.conf 使用db作为逆向sql存储的地方,配置虚拟组名称,配置数据源
transport {
  # tcp udt unix-domain-socket
  type = "TCP"
  #NIO NATIVE
  server = "NIO"
  #enable heartbeat
  heartbeat = true
  #thread factory for netty
  thread-factory {
    boss-thread-prefix = "NettyBoss"
    worker-thread-prefix = "NettyServerNIOWorker"
    server-executor-thread-prefix = "NettyServerBizHandler"
    share-boss-worker = false
    client-selector-thread-prefix = "NettyClientSelector"
    client-selector-thread-size = 1
    client-worker-thread-prefix = "NettyClientWorkerThread"
    # netty boss thread size,will not be used for UDT
    boss-thread-size = 1
    #auto default pin or 8
    worker-thread-size = 8
  }
  shutdown {
    # when destroy server, wait seconds
    wait = 3
  }
  serialization = "seata"
  compressor = "none"
}
service {
  #vgroup->rgroup
  vgroupMapping.my_test_tx_group = "default"
  #only support single node
  default.grouplist = "127.0.0.1:8091"
  #degrade current not support
  enableDegrade = false
  #disable
  disable = false
  #unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
  max.commit.retry.timeout = "-1"
  max.rollback.retry.timeout = "-1"
}

client {
  async.commit.buffer.limit = 10000
  lock {
    retry.internal = 10
    retry.times = 30
  }
  report.retry.count = 5
}

## transaction log store
store {
  ## store mode: file、db
  mode = "db"

  ## file store
  file {
    dir = "sessionStore"

    # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
    max-branch-session-size = 16384
    # globe session size , if exceeded throws exceptions
    max-global-session-size = 512
    # file buffer size , if exceeded allocate new buffer
    file-write-buffer-cache-size = 16384
    # when recover batch read size
    session.reload.read_size = 100
    # async, sync
    flush-disk-mode = async
  }

  ## database store
  db {
    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
    datasource = "druid"
    ## mysql/oracle/h2/oceanbase etc.
    db-type = "mysql"
    url = "jdbc:mysql://127.0.0.1:3306/seata?rewriteBatchedStatements=true"
    user = "root"
    password = "root"
    min-conn = 1
    max-conn = 3
    global.table = "global_table"
    branch.table = "branch_table"
    lock-table = "lock_table"
    query-limit = 100
  }
}
lock {
  ## the lock store mode: local、remote
  mode = "remote"

  local {
    ## store locks in user's database
  }

  remote {
    ## store locks in the seata's server
  }
}
recovery {
  committing-retry-delay = 30
  asyn-committing-retry-delay = 30
  rollbacking-retry-delay = 30
  timeout-retry-delay = 30
}

transaction {
  undo.data.validation = true
  undo.log.serialization = "jackson"
}

## metrics settings
metrics {
  enabled = false
  registry-type = "compact"
  # multi exporters use comma divided
  exporter-list = "prometheus"
  exporter-prometheus-port = 9898
}
  • seate-server所需表
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(
    `xid`                       VARCHAR(128) NOT NULL,
    `transaction_id`            BIGINT,
    `status`                    TINYINT      NOT NULL,
    `application_id`            VARCHAR(32),
    `transaction_service_group` VARCHAR(32),
    `transaction_name`          VARCHAR(128),
    `timeout`                   INT,
    `begin_time`                BIGINT,
    `application_data`          VARCHAR(2000),
    `gmt_create`                DATETIME,
    `gmt_modified`              DATETIME,
    PRIMARY KEY (`xid`),
    KEY `idx_status_gmt_modified` (`status` , `gmt_modified`),
    KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
    `branch_id`         BIGINT       NOT NULL,
    `xid`               VARCHAR(128) NOT NULL,
    `transaction_id`    BIGINT,
    `resource_group_id` VARCHAR(32),
    `resource_id`       VARCHAR(256),
    `branch_type`       VARCHAR(8),
    `status`            TINYINT,
    `client_id`         VARCHAR(64),
    `application_data`  VARCHAR(2000),
    `gmt_create`        DATETIME(6),
    `gmt_modified`      DATETIME(6),
    PRIMARY KEY (`branch_id`),
    KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
    `row_key`        VARCHAR(128) NOT NULL,
    `xid`            VARCHAR(128),
    `transaction_id` BIGINT,
    `branch_id`      BIGINT       NOT NULL,
    `resource_id`    VARCHAR(256),
    `table_name`     VARCHAR(32),
    `pk`             VARCHAR(36),
    `status`         TINYINT      NOT NULL DEFAULT '0' COMMENT '0:locked ,1:rollbacking',
    `gmt_create`     DATETIME,
    `gmt_modified`   DATETIME,
    PRIMARY KEY (`row_key`),
    KEY `idx_status` (`status`),
    KEY `idx_branch_id` (`branch_id`),
    KEY `idx_xid_and_branch_id` (`xid` , `branch_id`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

CREATE TABLE IF NOT EXISTS `distributed_lock`
(
    `lock_key`       CHAR(20) NOT NULL,
    `lock_value`     VARCHAR(20) NOT NULL,
    `expire`         BIGINT,
    primary key (`lock_key`)
) ENGINE = InnoDB
  DEFAULT CHARSET = utf8mb4;

INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('AsyncCommitting', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('RetryCommitting', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('RetryRollbacking', ' ', 0);
INSERT INTO `distributed_lock` (lock_key, lock_value, expire) VALUES ('TxTimeoutCheck', ' ', 0);
  • 在每一个分片数据库实例中执创建 undo_log 表
CREATE TABLE IF NOT EXISTS `undo_log`
(
  `id`            BIGINT(20)   NOT NULL AUTO_INCREMENT COMMENT 'increment id',
  `branch_id`     BIGINT(20)   NOT NULL COMMENT 'branch transaction id',
  `xid`           VARCHAR(100) NOT NULL COMMENT 'global transaction id',
  `context`       VARCHAR(128) NOT NULL COMMENT 'undo_log context,such as serialization',
  `rollback_info` LONGBLOB     NOT NULL COMMENT 'rollback info',
  `log_status`    INT(11)      NOT NULL COMMENT '0:normal status,1:defense status',
  `log_created`   DATETIME     NOT NULL COMMENT 'create datetime',
  `log_modified`  DATETIME     NOT NULL COMMENT 'modify datetime',
  PRIMARY KEY (`id`),
  UNIQUE KEY `ux_undo_log` (`xid`, `branch_id`)
) ENGINE = InnoDB
  AUTO_INCREMENT = 1
  DEFAULT CHARSET = utf8 COMMENT ='AT transaction mode undo table';

启动seata

bin/seata-server.bat

(opens new window)

上次更新: 2024/03/03, 08:36:37
Redis进阶
SpringCloud

← Redis进阶 SpringCloud→

Theme by Vdoing | Copyright © 2023-2024 Starry | MIT License
  • 跟随系统
  • 浅色模式
  • 深色模式
  • 阅读模式