1 緩存一致性協(xié)議產(chǎn)生的背景
2,多級(jí)緩存框架設(shè)計(jì)方案
3守伸,Mysql與redis緩存一致性原理
mysql 與redis/es/mongdb都是相同的
第一次查詢:
1,查詢二級(jí)緩存redis浦妄,redis如果沒(méi)有該緩存數(shù)據(jù)尼摹,則開(kāi)始查詢mysql;
2, 在查詢mysql,如果mysql中存在數(shù)據(jù)的情況下剂娄,就將該數(shù)據(jù)緩存到Redis中蠢涝。
第二次查詢:
1,查詢到redis中如果存在該數(shù)據(jù)的情況下阅懦,則不會(huì)查詢mysql
能夠減輕數(shù)據(jù)庫(kù)訪問(wèn)壓力和二;
重點(diǎn)解決方案:
1,對(duì)mysql做寫(xiě)的操作時(shí)候耳胎,都會(huì)同步到redis中
如果insert db ,
如果 update惯吕。將redis中該key刪除。--懶加載
如果update怕午,直接修改redis --增量同步
如果delete,將redis中該key刪除废登。---增量同步。
如果數(shù)據(jù)庫(kù)發(fā)生變化郁惜,如何同步給redis
1钳宪,直接清除redis緩存:(適合小項(xiàng)目)
2,基于mq形式異步同步,(適合中小項(xiàng)目)
3,基于cannel+mq異步同步(推薦)
4扳炬,如果數(shù)據(jù)庫(kù)發(fā)生變化,如何同步給redis
5搔体,cannal 同步數(shù)據(jù)一致性原理
6恨樟,緩存雙寫(xiě)一致性方案之旁路緩存策略
7,緩存雙寫(xiě)一致性方案之延遲雙刪策略疚俱。
基于cannel同步的原理:
1劝术,cannel 服務(wù)器端偽裝成一個(gè)mysql從節(jié)點(diǎn),訂閱mysql主節(jié)點(diǎn)的binlog二進(jìn)制文件。
2养晋,cannel 服務(wù)器端收到binlog文件衬吆,就會(huì)轉(zhuǎn)換成json的格式發(fā)送給Cannel客戶端。
3绳泉,cannel 客戶端會(huì)將數(shù)據(jù)同步給nosql緩存 redis逊抡。
配置Mysql服務(wù)器
select @@datadir;
D:\Program Files\mysql-8.0.20-winx64
- 配置MySQL的 my.ini/my.cnf 開(kāi)啟允許基于binlog文件主從同步
log-bin=mysql-bin #添加這一行就ok
binlog-format=ROW #選擇row模式
server_id=1 #配置mysql replaction需要定義,不能和canal的slaveId重復(fù)
配置該文件后零酪,重啟mysql服務(wù)器即可
show variables like 'log_bin';
沒(méi)有開(kāi)啟log_bin的值是OFF,開(kāi)啟之后是ON
- 添加cannl的賬號(hào) 或者直接使用自己的root賬號(hào)
手動(dòng)創(chuàng)建cannl賬號(hào)或者直接使用root賬號(hào)
drop user 'canal'@'%';
create user 'canal'@'%' identified by 'canal'
grant all privileges on . to 'canal'@'%' with grant option;
FLUSH PRIVILEGES;
select * from mysql.user where user='canal'; 查詢賬號(hào)權(quán)限
一定要檢查mysql user 權(quán)限為y
配置CanalService
修改 \conf\example下的instance.properties 配置文件內(nèi)容
canal.instance.master.address=127.0.0.1:3306
canal.instance.dbUsername=canal
canal.instance.dbPassword=canal
啟動(dòng)startup.bat 查看 \logs\example example.log日志文件
創(chuàng)建CanalClient
Maven依賴
<dependencies>
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.client</artifactId>
<version>1.1.0</version>
</dependency>
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
</dependencies>
同步代碼
package com.mayikt.canal;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.CanalEntry.*;
import com.alibaba.otter.canal.protocol.Message;
import java.net.InetSocketAddress;
import java.util.List;
/**
* CanalClient
*/
public class CanalClient {
public static void main(String args[]) {
CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("127.0.0.1",
11111), "example", "", "");
int batchSize = 100;
try {
connector.connect();
// 配置同步db信息
connector.subscribe("test.users");
connector.rollback();
while (true) {
// 獲取指定數(shù)量的數(shù)據(jù)
Message message = connector.getWithoutAck(batchSize);
long batchId = message.getId();
int size = message.getEntries().size();
System.out.println("batchId = " + batchId);
System.out.println("size = " + size);
if (batchId == -1 || size == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
printEntry(message.getEntries());
}
// 提交確認(rèn)
connector.ack(batchId);
// connector.rollback(batchId); // 處理失敗, 回滾數(shù)據(jù)
}
} finally {
connector.disconnect();
}
}
private static void printEntry(List<Entry> entrys) {
for (Entry entry : entrys) {
if (entry.getEntryType() == EntryType.TRANSACTIONBEGIN || entry.getEntryType() == EntryType.TRANSACTIONEND) {
continue;
}
RowChange rowChage = null;
try {
rowChage = RowChange.parseFrom(entry.getStoreValue());
} catch (Exception e) {
throw new RuntimeException("ERROR ## parser of eromanga-event has an error , data:" + entry.toString(),
e);
}
EventType eventType = rowChage.getEventType();
System.out.println(String.format("================> binlog[%s:%s] , name[%s,%s] , eventType : %s",
entry.getHeader().getLogfileName(), entry.getHeader().getLogfileOffset(),
entry.getHeader().getSchemaName(), entry.getHeader().getTableName(),
eventType));
for (RowData rowData : rowChage.getRowDatasList()) {
if (eventType == EventType.DELETE) {
redisDelete(rowData.getBeforeColumnsList());
} else if (eventType == EventType.INSERT) {
redisInsert(rowData.getAfterColumnsList());
} else {
System.out.println("-------> before");
printColumn(rowData.getBeforeColumnsList());
System.out.println("-------> after");
redisUpdate(rowData.getAfterColumnsList());
}
}
}
}
private static void printColumn(List<Column> columns) {
for (Column column : columns) {
System.out.println(column.getName() + " : " + column.getValue() + " update=" + column.getUpdated());
}
}
private static void redisInsert(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.stringSet(columns.get(0).getValue(), json.toJSONString());
}
}
private static void redisUpdate(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.stringSet(columns.get(0).getValue()+columns.get(0).getValue(), json.toJSONString());
}
}
private static void redisDelete(List<Column> columns) {
JSONObject json = new JSONObject();
for (Column column : columns) {
json.put(column.getName(), column.getValue());
}
if (columns.size() > 0) {
RedisUtil.delKey(columns.get(0).getValue());
}
}
}
package com.mayikt.canal;
import redis.clients.jedis.Jedis;
public class RedisUtil {
private static Jedis jedis = null;
public static synchronized Jedis getJedis() {
if (jedis == null) {
jedis = new Jedis("127.0.0.1", 6379);
}
return jedis;
}
public static boolean existKey(String key) {
return getJedis().exists(key);
}
public static void delKey(String key) {
getJedis().del(key);
}
public static String stringGet(String key) {
return getJedis().get(key);
}
public static String stringSet(String key, String value) {
return getJedis().set(key, value);
}
public static void hashSet(String key, String field, String value) {
getJedis().hset(key, field, value);
}
}
整合Kafka
Kafka環(huán)境
- 先安裝zookeeper
zoo_sample.cfg 修改為 zoo.cfg
修改 zoo.cfg 中的 dataDir=E:\zkkafka\zookeeper-3.4.14\data
新增環(huán)境變量:
ZOOKEEPER_HOME: E:\zkkafka\zookeeper-3.4.14 (zookeeper目錄)
Path: 在現(xiàn)有的值后面添加 ";%ZOOKEEPER_HOME%\bin;"
運(yùn)行zk zkServer.cmd
- 安裝kafka
解壓 kafka_2.13-2.4.0 改名為 kafka
修改 server.properties中的配置
log.dirs=E:\zkkafka\kafka\logs
Cmd 進(jìn)入到該目錄:
cd E:\zkkafka\kafka
.\bin\windows\kafka-server-start.bat .\config\server.properties
Kafka啟動(dòng)成功
Canal配置更改
1.修改 example/instance.properties
canal.mq.topic=maikt-topic
2.修改 canal.properties
tcp, kafka, RocketMQ
canal.serverMode = kafka
canal.mq.servers = 127.0.0.1:9092
SpringBoot項(xiàng)目整合kafka
Maven依賴
同步代碼
package com.mayikt;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.mayikt.utils.RedisUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* @author 余勝軍
*/
@RestController
@Slf4j
@SpringBootApplication
public class KafkaController {
/**
* 注入kafkaTemplate
*/
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Autowired
private RedisUtils redisUtils;
/**
* 發(fā)送消息的方法
*
* @param key 推送數(shù)據(jù)的key
* @param data 推送數(shù)據(jù)的data
*/
private void send(String key, String data) {
// topic 名稱 key data 消息數(shù)據(jù)
kafkaTemplate.send("mayikt", key, data);
}
// test 主題 1 my_test 3
@RequestMapping("/kafka")
public String testKafka() {
int iMax = 6;
for (int i = 1; i < iMax; i++) {
send("key" + i, "data" + i);
}
return "success";
}
public static void main(String[] args) {
SpringApplication.run(KafkaController.class, args);
}
/**
* 消費(fèi)者使用日志打印消息
*/
@KafkaListener(topics = "maikt-topic")
public void receive(ConsumerRecord<?, ?> consumer) {
String index = consumer.offset() + "" + consumer.value();
log.info(">topic名稱:{},,key:{},分區(qū)位置:{},, 下標(biāo){}<", consumer.topic(), consumer.key(), consumer.partition(), index);
String json = (String) consumer.value();
JSONObject jsonObject = JSONObject.parseObject(json);
String sqlType = jsonObject.getString("type");
JSONArray data = jsonObject.getJSONArray("data");
JSONObject userObject = data.getJSONObject(0);
String id = userObject.getString("id");
String database = jsonObject.getString("database");
String table = jsonObject.getString("table");
String key = database + "_" + table + "_" + id;
if ("UPDATE".equals(sqlType) || "INSERT".equals(sqlType)) {
redisUtils.setString(key, userObject.toJSONString());
return;
}
if ("DELETE".equals(sqlType)) {
redisUtils.deleteKey(key);
}
}
}
YML
# kafka
spring:
kafka:
# kafka服務(wù)器地址(可以多個(gè))
bootstrap-servers: 127.0.0.1:9092
consumer:
# 指定一個(gè)默認(rèn)的組名
group-id: kafka2
# earliest:當(dāng)各分區(qū)下有已提交的offset時(shí)冒嫡,從提交的offset開(kāi)始消費(fèi);無(wú)提交的offset時(shí)四苇,從頭開(kāi)始消費(fèi)
# latest:當(dāng)各分區(qū)下有已提交的offset時(shí)孝凌,從提交的offset開(kāi)始消費(fèi);無(wú)提交的offset時(shí)月腋,消費(fèi)新產(chǎn)生的該分區(qū)下的數(shù)據(jù)
# none:topic各分區(qū)都存在已提交的offset時(shí)蟀架,從offset后開(kāi)始消費(fèi);只要有一個(gè)分區(qū)不存在已提交的offset榆骚,則拋出異常
auto-offset-reset: earliest
# key/value的反序列化
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
producer:
# key/value的序列化
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# 批量抓取
batch-size: 65536
# 緩存容量
buffer-memory: 524288
redis:
host: 127.0.0.1
# password:
port: 6379
database: 0
···
pom
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.itmayiedu</groupId>
<artifactId>springboot2.0_kafka</artifactId>
<version>0.0.1-SNAPSHOT</version>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.0.1.RELEASE</version>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.62</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.12</version>
</dependency>
</dependencies>
</project>