feat(kv): 初始化 KV 服务模块

- 添加了笔记内容的增删查 DTO 类
- 配置了 Cassandra 数据库连接
- 实现了基于 Cassandra 的笔记内容存储与查询功能

feat(kv): 初始化 distributeID 服务模块

- 实现了分布式 ID 生成器服务(Snowflake与 Segment)
- 添加了 ID 生成器监控接口
- 配置了 MyBatis 与数据库交互
- 添加了 Segment 与 Snowflake 服务实现
- 添加了 Leaf 相关模型类与分配器接口
- 添加了 Leaf 分配器实现类
- 添加了 Leaf 控制器与监控视图
- 添加了 Leaf 异常处理类
- 添加了 Leaf 日志配置文件
- 添加了 Leaf 启动类
- 添加了 Leaf 常量定义
- 添加了 Leaf ID 生成接口
- 添加了 Leaf 初始化异常类
- 添加了 Leaf 配置文件
- 添加了 Leaf 模型类
- 添加了 Leaf 服务类
- 添加了 Leaf 工具类
- 添加了 Leaf 相关注解
- 添加了 Leaf 相关配置类
- 添加了 Leaf 相关枚举类
- 添加了 Leaf 相关工具类

后续考虑复刻Leaf代码至Java21平台
This commit is contained in:
Hanserwei
2025-10-06 22:28:27 +08:00
parent 534a49a358
commit 2910fdb54f
63 changed files with 2706 additions and 1 deletions

View File

@@ -0,0 +1,136 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<!-- 指定父项目 -->
<parent>
<groupId>com.hanserwei</groupId>
<artifactId>han-note-distributed-id-generator</artifactId>
<version>${revision}</version>
</parent>
<!-- 打包方式 -->
<packaging>jar</packaging>
<artifactId>hannote-distributed-id-generator-biz</artifactId>
<name>${project.artifactId}</name>
<description>分布式 ID 生成业务层</description>
<properties>
<common-io.version>2.4</common-io.version>
<perf4j.version>0.9.16</perf4j.version>
<druid.version>1.0.18</druid.version>
<mybatis.version>3.3.0</mybatis.version>
<curator-recipes.version>2.6.0</curator-recipes.version>
<zookeeper.version>3.6.0</zookeeper.version>
</properties>
<dependencies>
<dependency>
<groupId>com.hanserwei</groupId>
<artifactId>hanserwei-common</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-bootstrap</artifactId>
</dependency>
<!-- 服务发现 -->
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${common-io.version}</version>
</dependency>
<dependency>
<groupId>org.perf4j</groupId>
<artifactId>perf4j</artifactId>
<version>${perf4j.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.29</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.mybatis</groupId>
<artifactId>mybatis</artifactId>
<version>${mybatis.version}</version>
</dependency>
<!-- zk -->
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator-recipes.version}</version>
<!-- 为防止日志冲突,添加以下排除项 -->
<exclusions>
<exclusion>
<artifactId>log4j</artifactId>
<groupId>log4j</groupId>
</exclusion>
<exclusion>
<artifactId>org.slf4j</artifactId>
<groupId>slf4j-reload4j</groupId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<version>${zookeeper.version}</version>
<!-- 为防止日志冲突,添加以下排除项 -->
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,11 @@
package com.hanserwei.hannote.distributed.id.generator.biz;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class HannoteDistributedIdGeneratorBizApplication {
public static void main(String[] args) {
SpringApplication.run(HannoteDistributedIdGeneratorBizApplication.class, args);
}
}

View File

@@ -0,0 +1,12 @@
package com.hanserwei.hannote.distributed.id.generator.biz.constant;
public class Constants {
public static final String LEAF_SEGMENT_ENABLE = "leaf.segment.enable";
public static final String LEAF_JDBC_URL = "leaf.jdbc.url";
public static final String LEAF_JDBC_USERNAME = "leaf.jdbc.username";
public static final String LEAF_JDBC_PASSWORD = "leaf.jdbc.password";
public static final String LEAF_SNOWFLAKE_ENABLE = "leaf.snowflake.enable";
public static final String LEAF_SNOWFLAKE_PORT = "leaf.snowflake.port";
public static final String LEAF_SNOWFLAKE_ZK_ADDRESS = "leaf.snowflake.zk.address";
}

View File

@@ -0,0 +1,47 @@
package com.hanserwei.hannote.distributed.id.generator.biz.controller;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Status;
import com.hanserwei.hannote.distributed.id.generator.biz.exception.LeafServerException;
import com.hanserwei.hannote.distributed.id.generator.biz.exception.NoKeyException;
import com.hanserwei.hannote.distributed.id.generator.biz.service.SegmentService;
import com.hanserwei.hannote.distributed.id.generator.biz.service.SnowflakeService;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("/id")
@Slf4j
public class LeafController {
@Resource
private SegmentService segmentService;
@Resource
private SnowflakeService snowflakeService;
@RequestMapping(value = "/segment/get/{key}")
public String getSegmentId(@PathVariable("key") String key) {
return get(key, segmentService.getId(key));
}
@RequestMapping(value = "/snowflake/get/{key}")
public String getSnowflakeId(@PathVariable("key") String key) {
return get(key, snowflakeService.getId(key));
}
private String get(@PathVariable("key") String key, Result id) {
Result result;
if (key == null || key.isEmpty()) {
throw new NoKeyException();
}
result = id;
if (result.getStatus().equals(Status.EXCEPTION)) {
throw new LeafServerException(result.toString());
}
return String.valueOf(result.getId());
}
}

View File

@@ -0,0 +1,104 @@
package com.hanserwei.hannote.distributed.id.generator.biz.controller;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.SegmentIDGenImpl;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.LeafAlloc;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.SegmentBuffer;
import com.hanserwei.hannote.distributed.id.generator.biz.model.SegmentBufferView;
import com.hanserwei.hannote.distributed.id.generator.biz.service.SegmentService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Controller
public class LeafMonitorController {
private Logger logger = LoggerFactory.getLogger(LeafMonitorController.class);
@Autowired
private SegmentService segmentService;
@RequestMapping(value = "cache")
public String getCache(Model model) {
Map<String, SegmentBufferView> data = new HashMap<>();
SegmentIDGenImpl segmentIDGen = segmentService.getIdGen();
if (segmentIDGen == null) {
throw new IllegalArgumentException("You should config leaf.segment.enable=true first");
}
Map<String, SegmentBuffer> cache = segmentIDGen.getCache();
for (Map.Entry<String, SegmentBuffer> entry : cache.entrySet()) {
SegmentBufferView sv = new SegmentBufferView();
SegmentBuffer buffer = entry.getValue();
sv.setInitOk(buffer.isInitOk());
sv.setKey(buffer.getKey());
sv.setPos(buffer.getCurrentPos());
sv.setNextReady(buffer.isNextReady());
sv.setMax0(buffer.getSegments()[0].getMax());
sv.setValue0(buffer.getSegments()[0].getValue().get());
sv.setStep0(buffer.getSegments()[0].getStep());
sv.setMax1(buffer.getSegments()[1].getMax());
sv.setValue1(buffer.getSegments()[1].getValue().get());
sv.setStep1(buffer.getSegments()[1].getStep());
data.put(entry.getKey(), sv);
}
logger.info("Cache info {}", data);
model.addAttribute("data", data);
return "segment";
}
@RequestMapping(value = "db")
public String getDb(Model model) {
SegmentIDGenImpl segmentIDGen = segmentService.getIdGen();
if (segmentIDGen == null) {
throw new IllegalArgumentException("You should config leaf.segment.enable=true first");
}
List<LeafAlloc> items = segmentIDGen.getAllLeafAllocs();
logger.info("DB info {}", items);
model.addAttribute("items", items);
return "db";
}
/**
* the output is like this:
* {
* "timestamp": "1567733700834(2019-09-06 09:35:00.834)",
* "sequenceId": "3448",
* "workerId": "39"
* }
*/
@RequestMapping(value = "decodeSnowflakeId")
@ResponseBody
public Map<String, String> decodeSnowflakeId(@RequestParam("snowflakeId") String snowflakeIdStr) {
Map<String, String> map = new HashMap<>();
try {
long snowflakeId = Long.parseLong(snowflakeIdStr);
long originTimestamp = (snowflakeId >> 22) + 1288834974657L;
Date date = new Date(originTimestamp);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
map.put("timestamp", String.valueOf(originTimestamp) + "(" + sdf.format(date) + ")");
long workerId = (snowflakeId >> 12) ^ (snowflakeId >> 22 << 10);
map.put("workerId", String.valueOf(workerId));
long sequence = snowflakeId ^ (snowflakeId >> 12 << 12);
map.put("sequenceId", String.valueOf(sequence));
} catch (NumberFormatException e) {
map.put("errorMsg", "snowflake Id反解析发生异常!");
}
return map;
}
}

View File

@@ -0,0 +1,9 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
public interface IDGen {
Result get(String key);
boolean init();
}

View File

@@ -0,0 +1,27 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
public class CheckVO {
private long timestamp;
private int workID;
public CheckVO(long timestamp, int workID) {
this.timestamp = timestamp;
this.workID = workID;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
public int getWorkID() {
return workID;
}
public void setWorkID(int workID) {
this.workID = workID;
}
}

View File

@@ -0,0 +1,22 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Properties;
public class PropertyFactory {
private static final Logger logger = LoggerFactory.getLogger(PropertyFactory.class);
private static final Properties prop = new Properties();
static {
try {
prop.load(PropertyFactory.class.getClassLoader().getResourceAsStream("leaf.properties"));
} catch (IOException e) {
logger.warn("Load Properties Ex", e);
}
}
public static Properties getProperties() {
return prop;
}
}

View File

@@ -0,0 +1,39 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
public class Result {
private long id;
private Status status;
public Result() {
}
public Result(long id, Status status) {
this.id = id;
this.status = status;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Status getStatus() {
return status;
}
public void setStatus(Status status) {
this.status = status;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Result{");
sb.append("id=").append(id);
sb.append(", status=").append(status);
sb.append('}');
return sb.toString();
}
}

View File

@@ -0,0 +1,6 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
public enum Status {
SUCCESS,
EXCEPTION
}

View File

@@ -0,0 +1,75 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public class Utils {
private static final Logger logger = LoggerFactory.getLogger(Utils.class);
public static String getIp() {
String ip;
try {
List<String> ipList = getHostAddress(null);
// default the first
ip = (!ipList.isEmpty()) ? ipList.get(0) : "";
} catch (Exception ex) {
ip = "";
logger.warn("Utils get IP warn", ex);
}
return ip;
}
public static String getIp(String interfaceName) {
String ip;
interfaceName = interfaceName.trim();
try {
List<String> ipList = getHostAddress(interfaceName);
ip = (!ipList.isEmpty()) ? ipList.get(0) : "";
} catch (Exception ex) {
ip = "";
logger.warn("Utils get IP warn", ex);
}
return ip;
}
/**
* 获取已激活网卡的IP地址
*
* @param interfaceName 可指定网卡名称,null则获取全部
* @return List<String>
*/
private static List<String> getHostAddress(String interfaceName) throws SocketException {
List<String> ipList = new ArrayList<String>(5);
Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
while (interfaces.hasMoreElements()) {
NetworkInterface ni = interfaces.nextElement();
Enumeration<InetAddress> allAddress = ni.getInetAddresses();
while (allAddress.hasMoreElements()) {
InetAddress address = allAddress.nextElement();
if (address.isLoopbackAddress()) {
// skip the loopback addr
continue;
}
if (address instanceof Inet6Address) {
// skip the IPv6 addr
continue;
}
String hostAddress = address.getHostAddress();
if (null == interfaceName) {
ipList.add(hostAddress);
} else if (interfaceName.equals(ni.getDisplayName())) {
ipList.add(hostAddress);
}
}
}
return ipList;
}
}

View File

@@ -0,0 +1,16 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.common;
import com.hanserwei.hannote.distributed.id.generator.biz.core.IDGen;
public class ZeroIDGen implements IDGen {
@Override
public Result get(String key) {
return new Result(0, Status.SUCCESS);
}
@Override
public boolean init() {
return true;
}
}

View File

@@ -0,0 +1,294 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment;
import com.hanserwei.hannote.distributed.id.generator.biz.core.IDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Status;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocDao;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.LeafAlloc;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.Segment;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.SegmentBuffer;
import org.perf4j.StopWatch;
import org.perf4j.slf4j.Slf4JStopWatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
public class SegmentIDGenImpl implements IDGen {
private static final Logger logger = LoggerFactory.getLogger(SegmentIDGenImpl.class);
/**
* IDCache未初始化成功时的异常码
*/
private static final long EXCEPTION_ID_IDCACHE_INIT_FALSE = -1;
/**
* key不存在时的异常码
*/
private static final long EXCEPTION_ID_KEY_NOT_EXISTS = -2;
/**
* SegmentBuffer中的两个Segment均未从DB中装载时的异常码
*/
private static final long EXCEPTION_ID_TWO_SEGMENTS_ARE_NULL = -3;
/**
* 最大步长不超过100,0000
*/
private static final int MAX_STEP = 1000000;
/**
* 一个Segment维持时间为15分钟
*/
private static final long SEGMENT_DURATION = 15 * 60 * 1000L;
private ExecutorService service = new ThreadPoolExecutor(5, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new UpdateThreadFactory());
private volatile boolean initOK = false;
private Map<String, SegmentBuffer> cache = new ConcurrentHashMap<String, SegmentBuffer>();
private IDAllocDao dao;
public static class UpdateThreadFactory implements ThreadFactory {
private static int threadInitNumber = 0;
private static synchronized int nextThreadNum() {
return threadInitNumber++;
}
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "Thread-Segment-Update-" + nextThreadNum());
}
}
@Override
public boolean init() {
logger.info("Init ...");
// 确保加载到kv后才初始化成功
updateCacheFromDb();
initOK = true;
updateCacheFromDbAtEveryMinute();
return initOK;
}
private void updateCacheFromDbAtEveryMinute() {
ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
t.setName("check-idCache-thread");
t.setDaemon(true);
return t;
}
});
service.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
updateCacheFromDb();
}
}, 60, 60, TimeUnit.SECONDS);
}
private void updateCacheFromDb() {
logger.info("update cache from db");
StopWatch sw = new Slf4JStopWatch();
try {
List<String> dbTags = dao.getAllTags();
if (dbTags == null || dbTags.isEmpty()) {
return;
}
List<String> cacheTags = new ArrayList<String>(cache.keySet());
Set<String> insertTagsSet = new HashSet<>(dbTags);
Set<String> removeTagsSet = new HashSet<>(cacheTags);
//db中新加的tags灌进cache
for(int i = 0; i < cacheTags.size(); i++){
String tmp = cacheTags.get(i);
if(insertTagsSet.contains(tmp)){
insertTagsSet.remove(tmp);
}
}
for (String tag : insertTagsSet) {
SegmentBuffer buffer = new SegmentBuffer();
buffer.setKey(tag);
Segment segment = buffer.getCurrent();
segment.setValue(new AtomicLong(0));
segment.setMax(0);
segment.setStep(0);
cache.put(tag, buffer);
logger.info("Add tag {} from db to IdCache, SegmentBuffer {}", tag, buffer);
}
//cache中已失效的tags从cache删除
for(int i = 0; i < dbTags.size(); i++){
String tmp = dbTags.get(i);
if(removeTagsSet.contains(tmp)){
removeTagsSet.remove(tmp);
}
}
for (String tag : removeTagsSet) {
cache.remove(tag);
logger.info("Remove tag {} from IdCache", tag);
}
} catch (Exception e) {
logger.warn("update cache from db exception", e);
} finally {
sw.stop("updateCacheFromDb");
}
}
@Override
public Result get(final String key) {
if (!initOK) {
return new Result(EXCEPTION_ID_IDCACHE_INIT_FALSE, Status.EXCEPTION);
}
if (cache.containsKey(key)) {
SegmentBuffer buffer = cache.get(key);
if (!buffer.isInitOk()) {
synchronized (buffer) {
if (!buffer.isInitOk()) {
try {
updateSegmentFromDb(key, buffer.getCurrent());
logger.info("Init buffer. Update leafkey {} {} from db", key, buffer.getCurrent());
buffer.setInitOk(true);
} catch (Exception e) {
logger.warn("Init buffer {} exception", buffer.getCurrent(), e);
}
}
}
}
return getIdFromSegmentBuffer(cache.get(key));
}
return new Result(EXCEPTION_ID_KEY_NOT_EXISTS, Status.EXCEPTION);
}
public void updateSegmentFromDb(String key, Segment segment) {
StopWatch sw = new Slf4JStopWatch();
SegmentBuffer buffer = segment.getBuffer();
LeafAlloc leafAlloc;
if (!buffer.isInitOk()) {
leafAlloc = dao.updateMaxIdAndGetLeafAlloc(key);
buffer.setStep(leafAlloc.getStep());
buffer.setMinStep(leafAlloc.getStep());//leafAlloc中的step为DB中的step
} else if (buffer.getUpdateTimestamp() == 0) {
leafAlloc = dao.updateMaxIdAndGetLeafAlloc(key);
buffer.setUpdateTimestamp(System.currentTimeMillis());
buffer.setStep(leafAlloc.getStep());
buffer.setMinStep(leafAlloc.getStep());//leafAlloc中的step为DB中的step
} else {
long duration = System.currentTimeMillis() - buffer.getUpdateTimestamp();
int nextStep = buffer.getStep();
if (duration < SEGMENT_DURATION) {
if (nextStep * 2 > MAX_STEP) {
//do nothing
} else {
nextStep = nextStep * 2;
}
} else if (duration < SEGMENT_DURATION * 2) {
//do nothing with nextStep
} else {
nextStep = nextStep / 2 >= buffer.getMinStep() ? nextStep / 2 : nextStep;
}
logger.info("leafKey[{}], step[{}], duration[{}mins], nextStep[{}]", key, buffer.getStep(), String.format("%.2f",((double)duration / (1000 * 60))), nextStep);
LeafAlloc temp = new LeafAlloc();
temp.setKey(key);
temp.setStep(nextStep);
leafAlloc = dao.updateMaxIdByCustomStepAndGetLeafAlloc(temp);
buffer.setUpdateTimestamp(System.currentTimeMillis());
buffer.setStep(nextStep);
buffer.setMinStep(leafAlloc.getStep());//leafAlloc的step为DB中的step
}
// must set value before set max
long value = leafAlloc.getMaxId() - buffer.getStep();
segment.getValue().set(value);
segment.setMax(leafAlloc.getMaxId());
segment.setStep(buffer.getStep());
sw.stop("updateSegmentFromDb", key + " " + segment);
}
public Result getIdFromSegmentBuffer(final SegmentBuffer buffer) {
while (true) {
buffer.rLock().lock();
try {
final Segment segment = buffer.getCurrent();
if (!buffer.isNextReady() && (segment.getIdle() < 0.9 * segment.getStep()) && buffer.getThreadRunning().compareAndSet(false, true)) {
service.execute(new Runnable() {
@Override
public void run() {
Segment next = buffer.getSegments()[buffer.nextPos()];
boolean updateOk = false;
try {
updateSegmentFromDb(buffer.getKey(), next);
updateOk = true;
logger.info("update segment {} from db {}", buffer.getKey(), next);
} catch (Exception e) {
logger.warn(buffer.getKey() + " updateSegmentFromDb exception", e);
} finally {
if (updateOk) {
buffer.wLock().lock();
buffer.setNextReady(true);
buffer.getThreadRunning().set(false);
buffer.wLock().unlock();
} else {
buffer.getThreadRunning().set(false);
}
}
}
});
}
long value = segment.getValue().getAndIncrement();
if (value < segment.getMax()) {
return new Result(value, Status.SUCCESS);
}
} finally {
buffer.rLock().unlock();
}
waitAndSleep(buffer);
buffer.wLock().lock();
try {
final Segment segment = buffer.getCurrent();
long value = segment.getValue().getAndIncrement();
if (value < segment.getMax()) {
return new Result(value, Status.SUCCESS);
}
if (buffer.isNextReady()) {
buffer.switchPos();
buffer.setNextReady(false);
} else {
logger.error("Both two segments in {} are not ready!", buffer);
return new Result(EXCEPTION_ID_TWO_SEGMENTS_ARE_NULL, Status.EXCEPTION);
}
} finally {
buffer.wLock().unlock();
}
}
}
private void waitAndSleep(SegmentBuffer buffer) {
int roll = 0;
while (buffer.getThreadRunning().get()) {
roll += 1;
if(roll > 10000) {
try {
TimeUnit.MILLISECONDS.sleep(10);
break;
} catch (InterruptedException e) {
logger.warn("Thread {} Interrupted",Thread.currentThread().getName());
break;
}
}
}
}
public List<LeafAlloc> getAllLeafAllocs() {
return dao.getAllLeafAllocs();
}
public Map<String, SegmentBuffer> getCache() {
return cache;
}
public IDAllocDao getDao() {
return dao;
}
public void setDao(IDAllocDao dao) {
this.dao = dao;
}
}

View File

@@ -0,0 +1,13 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.LeafAlloc;
import java.util.List;
public interface IDAllocDao {
List<LeafAlloc> getAllLeafAllocs();
LeafAlloc updateMaxIdAndGetLeafAlloc(String tag);
LeafAlloc updateMaxIdByCustomStepAndGetLeafAlloc(LeafAlloc leafAlloc);
List<String> getAllTags();
}

View File

@@ -0,0 +1,35 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.LeafAlloc;
import org.apache.ibatis.annotations.*;
import java.util.List;
public interface IDAllocMapper {
@Select("SELECT biz_tag, max_id, step, update_time FROM leaf_alloc")
@Results(value = {
@Result(column = "biz_tag", property = "key"),
@Result(column = "max_id", property = "maxId"),
@Result(column = "step", property = "step"),
@Result(column = "update_time", property = "updateTime")
})
List<LeafAlloc> getAllLeafAllocs();
@Select("SELECT biz_tag, max_id, step FROM leaf_alloc WHERE biz_tag = #{tag}")
@Results(value = {
@Result(column = "biz_tag", property = "key"),
@Result(column = "max_id", property = "maxId"),
@Result(column = "step", property = "step")
})
LeafAlloc getLeafAlloc(@Param("tag") String tag);
@Update("UPDATE leaf_alloc SET max_id = max_id + step WHERE biz_tag = #{tag}")
void updateMaxId(@Param("tag") String tag);
@Update("UPDATE leaf_alloc SET max_id = max_id + #{step} WHERE biz_tag = #{key}")
void updateMaxIdByCustomStep(@Param("leafAlloc") LeafAlloc leafAlloc);
@Select("SELECT biz_tag FROM leaf_alloc")
List<String> getAllTags();
}

View File

@@ -0,0 +1,75 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.impl;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocDao;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model.LeafAlloc;
import org.apache.ibatis.mapping.Environment;
import org.apache.ibatis.session.Configuration;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.session.SqlSessionFactoryBuilder;
import org.apache.ibatis.transaction.TransactionFactory;
import org.apache.ibatis.transaction.jdbc.JdbcTransactionFactory;
import javax.sql.DataSource;
import java.util.List;
public class IDAllocDaoImpl implements IDAllocDao {
SqlSessionFactory sqlSessionFactory;
public IDAllocDaoImpl(DataSource dataSource) {
TransactionFactory transactionFactory = new JdbcTransactionFactory();
Environment environment = new Environment("development", transactionFactory, dataSource);
Configuration configuration = new Configuration(environment);
configuration.addMapper(IDAllocMapper.class);
sqlSessionFactory = new SqlSessionFactoryBuilder().build(configuration);
}
@Override
public List<LeafAlloc> getAllLeafAllocs() {
SqlSession sqlSession = sqlSessionFactory.openSession(false);
try {
return sqlSession.selectList("com.hanserwei.hannote.segment.dao.IDAllocMapper.getAllLeafAllocs");
} finally {
sqlSession.close();
}
}
@Override
public LeafAlloc updateMaxIdAndGetLeafAlloc(String tag) {
SqlSession sqlSession = sqlSessionFactory.openSession();
try {
sqlSession.update("com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper.updateMaxId", tag);
LeafAlloc result = sqlSession.selectOne("com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper.getLeafAlloc", tag);
sqlSession.commit();
return result;
} finally {
sqlSession.close();
}
}
@Override
public LeafAlloc updateMaxIdByCustomStepAndGetLeafAlloc(LeafAlloc leafAlloc) {
SqlSession sqlSession = sqlSessionFactory.openSession();
try {
sqlSession.update("com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper.updateMaxIdByCustomStep", leafAlloc);
LeafAlloc result = sqlSession.selectOne("com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper.getLeafAlloc", leafAlloc.getKey());
sqlSession.commit();
return result;
} finally {
sqlSession.close();
}
}
@Override
public List<String> getAllTags() {
SqlSession sqlSession = sqlSessionFactory.openSession(false);
try {
return sqlSession.selectList("com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocMapper.getAllTags");
} finally {
sqlSession.close();
}
}
}

View File

@@ -0,0 +1,40 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model;
public class LeafAlloc {
private String key;
private long maxId;
private int step;
private String updateTime;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public long getMaxId() {
return maxId;
}
public void setMaxId(long maxId) {
this.maxId = maxId;
}
public int getStep() {
return step;
}
public void setStep(int step) {
this.step = step;
}
public String getUpdateTime() {
return updateTime;
}
public void setUpdateTime(String updateTime) {
this.updateTime = updateTime;
}
}

View File

@@ -0,0 +1,59 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model;
import java.util.concurrent.atomic.AtomicLong;
public class Segment {
private AtomicLong value = new AtomicLong(0);
private volatile long max;
private volatile int step;
private SegmentBuffer buffer;
public Segment(SegmentBuffer buffer) {
this.buffer = buffer;
}
public AtomicLong getValue() {
return value;
}
public void setValue(AtomicLong value) {
this.value = value;
}
public long getMax() {
return max;
}
public void setMax(long max) {
this.max = max;
}
public int getStep() {
return step;
}
public void setStep(int step) {
this.step = step;
}
public SegmentBuffer getBuffer() {
return buffer;
}
public long getIdle() {
return this.getMax() - getValue().get();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Segment(");
sb.append("value:");
sb.append(value);
sb.append(",max:");
sb.append(max);
sb.append(",step:");
sb.append(step);
sb.append(")");
return sb.toString();
}
}

View File

@@ -0,0 +1,129 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.segment.model;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* 双buffer
*/
public class SegmentBuffer {
private String key;
private Segment[] segments; //双buffer
private volatile int currentPos; //当前的使用的segment的index
private volatile boolean nextReady; //下一个segment是否处于可切换状态
private volatile boolean initOk; //是否初始化完成
private final AtomicBoolean threadRunning; //线程是否在运行中
private final ReadWriteLock lock;
private volatile int step;
private volatile int minStep;
private volatile long updateTimestamp;
public SegmentBuffer() {
segments = new Segment[]{new Segment(this), new Segment(this)};
currentPos = 0;
nextReady = false;
initOk = false;
threadRunning = new AtomicBoolean(false);
lock = new ReentrantReadWriteLock();
}
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public Segment[] getSegments() {
return segments;
}
public Segment getCurrent() {
return segments[currentPos];
}
public int getCurrentPos() {
return currentPos;
}
public int nextPos() {
return (currentPos + 1) % 2;
}
public void switchPos() {
currentPos = nextPos();
}
public boolean isInitOk() {
return initOk;
}
public void setInitOk(boolean initOk) {
this.initOk = initOk;
}
public boolean isNextReady() {
return nextReady;
}
public void setNextReady(boolean nextReady) {
this.nextReady = nextReady;
}
public AtomicBoolean getThreadRunning() {
return threadRunning;
}
public Lock rLock() {
return lock.readLock();
}
public Lock wLock() {
return lock.writeLock();
}
public int getStep() {
return step;
}
public void setStep(int step) {
this.step = step;
}
public int getMinStep() {
return minStep;
}
public void setMinStep(int minStep) {
this.minStep = minStep;
}
public long getUpdateTimestamp() {
return updateTimestamp;
}
public void setUpdateTimestamp(long updateTimestamp) {
this.updateTimestamp = updateTimestamp;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("SegmentBuffer{");
sb.append("key='").append(key).append('\'');
sb.append(", segments=").append(Arrays.toString(segments));
sb.append(", currentPos=").append(currentPos);
sb.append(", nextReady=").append(nextReady);
sb.append(", initOk=").append(initOk);
sb.append(", threadRunning=").append(threadRunning);
sb.append(", step=").append(step);
sb.append(", minStep=").append(minStep);
sb.append(", updateTimestamp=").append(updateTimestamp);
sb.append('}');
return sb.toString();
}
}

View File

@@ -0,0 +1,113 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake;
import com.google.common.base.Preconditions;
import com.hanserwei.hannote.distributed.id.generator.biz.core.IDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Status;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Random;
public class SnowflakeIDGenImpl implements IDGen {
@Override
public boolean init() {
return true;
}
private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeIDGenImpl.class);
private final long twepoch;
private final long workerIdBits = 10L;
private final long maxWorkerId = ~(-1L << workerIdBits);//最大能够分配的workerid =1023
private final long sequenceBits = 12L;
private final long workerIdShift = sequenceBits;
private final long timestampLeftShift = sequenceBits + workerIdBits;
private final long sequenceMask = ~(-1L << sequenceBits);
private long workerId;
private long sequence = 0L;
private long lastTimestamp = -1L;
private static final Random RANDOM = new Random();
public SnowflakeIDGenImpl(String zkAddress, int port) {
//Thu Nov 04 2010 09:42:54 GMT+0800 (中国标准时间)
this(zkAddress, port, 1288834974657L);
}
/**
* @param zkAddress zk地址
* @param port snowflake监听端口
* @param twepoch 起始的时间戳
*/
public SnowflakeIDGenImpl(String zkAddress, int port, long twepoch) {
this.twepoch = twepoch;
Preconditions.checkArgument(timeGen() > twepoch, "Snowflake not support twepoch gt currentTime");
final String ip = Utils.getIp();
SnowflakeZookeeperHolder holder = new SnowflakeZookeeperHolder(ip, String.valueOf(port), zkAddress);
LOGGER.info("twepoch:{} ,ip:{} ,zkAddress:{} port:{}", twepoch, ip, zkAddress, port);
boolean initFlag = holder.init();
if (initFlag) {
workerId = holder.getWorkerID();
LOGGER.info("START SUCCESS USE ZK WORKERID-{}", workerId);
} else {
Preconditions.checkArgument(initFlag, "Snowflake Id Gen is not init ok");
}
Preconditions.checkArgument(workerId >= 0 && workerId <= maxWorkerId, "workerID must gte 0 and lte 1023");
}
@Override
public synchronized Result get(String key) {
long timestamp = timeGen();
if (timestamp < lastTimestamp) {
long offset = lastTimestamp - timestamp;
if (offset <= 5) {
try {
wait(offset << 1);
timestamp = timeGen();
if (timestamp < lastTimestamp) {
return new Result(-1, Status.EXCEPTION);
}
} catch (InterruptedException e) {
LOGGER.error("wait interrupted");
return new Result(-2, Status.EXCEPTION);
}
} else {
return new Result(-3, Status.EXCEPTION);
}
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
if (sequence == 0) {
//seq 为0的时候表示是下一毫秒时间开始对seq做随机
sequence = RANDOM.nextInt(100);
timestamp = tilNextMillis(lastTimestamp);
}
} else {
//如果是新的ms开始
sequence = RANDOM.nextInt(100);
}
lastTimestamp = timestamp;
long id = ((timestamp - twepoch) << timestampLeftShift) | (workerId << workerIdShift) | sequence;
return new Result(id, Status.SUCCESS);
}
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
protected long timeGen() {
return System.currentTimeMillis();
}
public long getWorkerId() {
return workerId;
}
}

View File

@@ -0,0 +1,292 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.Maps;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.PropertyFactory;
import com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake.exception.CheckLastTimeException;
import org.apache.commons.io.FileUtils;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryUntilElapsed;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
public class SnowflakeZookeeperHolder {
private static final Logger LOGGER = LoggerFactory.getLogger(SnowflakeZookeeperHolder.class);
private String zk_AddressNode = null;//保存自身的key ip:port-000000001
private String listenAddress = null;//保存自身的key ip:port
private int workerID;
private static final String PREFIX_ZK_PATH = "/snowflake/" + PropertyFactory.getProperties().getProperty("leaf.name");
private static final String PROP_PATH = System.getProperty("java.io.tmpdir") + File.separator + PropertyFactory.getProperties().getProperty("leaf.name") + "/leafconf/{port}/workerID.properties";
private static final String PATH_FOREVER = PREFIX_ZK_PATH + "/forever";//保存所有数据持久的节点
private String ip;
private String port;
private String connectionString;
private long lastUpdateTime;
public SnowflakeZookeeperHolder(String ip, String port, String connectionString) {
this.ip = ip;
this.port = port;
this.listenAddress = ip + ":" + port;
this.connectionString = connectionString;
}
public boolean init() {
try {
CuratorFramework curator = createWithOptions(connectionString, new RetryUntilElapsed(1000, 4), 10000, 6000);
curator.start();
Stat stat = curator.checkExists().forPath(PATH_FOREVER);
if (stat == null) {
//不存在根节点,机器第一次启动,创建/snowflake/ip:port-000000000,并上传数据
zk_AddressNode = createNode(curator);
//worker id 默认是0
updateLocalWorkerID(workerID);
//定时上报本机时间给forever节点
ScheduledUploadData(curator, zk_AddressNode);
return true;
} else {
Map<String, Integer> nodeMap = Maps.newHashMap();//ip:port->00001
Map<String, String> realNode = Maps.newHashMap();//ip:port->(ipport-000001)
//存在根节点,先检查是否有属于自己的根节点
List<String> keys = curator.getChildren().forPath(PATH_FOREVER);
for (String key : keys) {
String[] nodeKey = key.split("-");
realNode.put(nodeKey[0], key);
nodeMap.put(nodeKey[0], Integer.parseInt(nodeKey[1]));
}
Integer workerid = nodeMap.get(listenAddress);
if (workerid != null) {
//有自己的节点,zk_AddressNode=ip:port
zk_AddressNode = PATH_FOREVER + "/" + realNode.get(listenAddress);
workerID = workerid;//启动worder时使用会使用
if (!checkInitTimeStamp(curator, zk_AddressNode)) {
throw new CheckLastTimeException("init timestamp check error,forever node timestamp gt this node time");
}
//准备创建临时节点
doService(curator);
updateLocalWorkerID(workerID);
LOGGER.info("[Old NODE]find forever node have this endpoint ip-{} port-{} workid-{} childnode and start SUCCESS", ip, port, workerID);
} else {
//表示新启动的节点,创建持久节点 ,不用check时间
String newNode = createNode(curator);
zk_AddressNode = newNode;
String[] nodeKey = newNode.split("-");
workerID = Integer.parseInt(nodeKey[1]);
doService(curator);
updateLocalWorkerID(workerID);
LOGGER.info("[New NODE]can not find node on forever node that endpoint ip-{} port-{} workid-{},create own node on forever node and start SUCCESS ", ip, port, workerID);
}
}
} catch (Exception e) {
LOGGER.error("Start node ERROR {}", e);
try {
Properties properties = new Properties();
properties.load(new FileInputStream(new File(PROP_PATH.replace("{port}", port + ""))));
workerID = Integer.valueOf(properties.getProperty("workerID"));
LOGGER.warn("START FAILED ,use local node file properties workerID-{}", workerID);
} catch (Exception e1) {
LOGGER.error("Read file error ", e1);
return false;
}
}
return true;
}
private void doService(CuratorFramework curator) {
ScheduledUploadData(curator, zk_AddressNode);// /snowflake_forever/ip:port-000000001
}
private void ScheduledUploadData(final CuratorFramework curator, final String zk_AddressNode) {
Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r, "schedule-upload-time");
thread.setDaemon(true);
return thread;
}
}).scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
updateNewData(curator, zk_AddressNode);
}
}, 1L, 3L, TimeUnit.SECONDS);//每3s上报数据
}
private boolean checkInitTimeStamp(CuratorFramework curator, String zk_AddressNode) throws Exception {
byte[] bytes = curator.getData().forPath(zk_AddressNode);
Endpoint endPoint = deBuildData(new String(bytes));
//该节点的时间不能小于最后一次上报的时间
return !(endPoint.getTimestamp() > System.currentTimeMillis());
}
/**
* 创建持久顺序节点 ,并把节点数据放入 value
*
* @param curator
* @return
* @throws Exception
*/
private String createNode(CuratorFramework curator) throws Exception {
try {
return curator.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(PATH_FOREVER + "/" + listenAddress + "-", buildData().getBytes());
} catch (Exception e) {
LOGGER.error("create node error msg {} ", e.getMessage());
throw e;
}
}
private void updateNewData(CuratorFramework curator, String path) {
try {
if (System.currentTimeMillis() < lastUpdateTime) {
return;
}
curator.setData().forPath(path, buildData().getBytes());
lastUpdateTime = System.currentTimeMillis();
} catch (Exception e) {
LOGGER.info("update init data error path is {} error is {}", path, e);
}
}
/**
* 构建需要上传的数据
*
* @return
*/
private String buildData() throws JsonProcessingException {
Endpoint endpoint = new Endpoint(ip, port, System.currentTimeMillis());
ObjectMapper mapper = new ObjectMapper();
String json = mapper.writeValueAsString(endpoint);
return json;
}
private Endpoint deBuildData(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper();
Endpoint endpoint = mapper.readValue(json, Endpoint.class);
return endpoint;
}
/**
* 在节点文件系统上缓存一个workid值,zk失效,机器重启时保证能够正常启动
*
* @param workerID
*/
private void updateLocalWorkerID(int workerID) {
File leafConfFile = new File(PROP_PATH.replace("{port}", port));
boolean exists = leafConfFile.exists();
LOGGER.info("file exists status is {}", exists);
if (exists) {
try {
FileUtils.writeStringToFile(leafConfFile, "workerID=" + workerID, false);
LOGGER.info("update file cache workerID is {}", workerID);
} catch (IOException e) {
LOGGER.error("update file cache error ", e);
}
} else {
//不存在文件,父目录页肯定不存在
try {
boolean mkdirs = leafConfFile.getParentFile().mkdirs();
LOGGER.info("init local file cache create parent dis status is {}, worker id is {}", mkdirs, workerID);
if (mkdirs) {
if (leafConfFile.createNewFile()) {
FileUtils.writeStringToFile(leafConfFile, "workerID=" + workerID, false);
LOGGER.info("local file cache workerID is {}", workerID);
}
} else {
LOGGER.warn("create parent dir error===");
}
} catch (IOException e) {
LOGGER.warn("craete workerID conf file error", e);
}
}
}
private CuratorFramework createWithOptions(String connectionString, RetryPolicy retryPolicy, int connectionTimeoutMs, int sessionTimeoutMs) {
return CuratorFrameworkFactory.builder().connectString(connectionString)
.retryPolicy(retryPolicy)
.connectionTimeoutMs(connectionTimeoutMs)
.sessionTimeoutMs(sessionTimeoutMs)
.build();
}
/**
* 上报数据结构
*/
static class Endpoint {
private String ip;
private String port;
private long timestamp;
public Endpoint() {
}
public Endpoint(String ip, String port, long timestamp) {
this.ip = ip;
this.port = port;
this.timestamp = timestamp;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public String getPort() {
return port;
}
public void setPort(String port) {
this.port = port;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
}
public String getZk_AddressNode() {
return zk_AddressNode;
}
public void setZk_AddressNode(String zk_AddressNode) {
this.zk_AddressNode = zk_AddressNode;
}
public String getListenAddress() {
return listenAddress;
}
public void setListenAddress(String listenAddress) {
this.listenAddress = listenAddress;
}
public int getWorkerID() {
return workerID;
}
public void setWorkerID(int workerID) {
this.workerID = workerID;
}
}

View File

@@ -0,0 +1,7 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake.exception;
public class CheckLastTimeException extends RuntimeException {
public CheckLastTimeException(String msg){
super(msg);
}
}

View File

@@ -0,0 +1,7 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake.exception;
public class CheckOtherNodeException extends RuntimeException {
public CheckOtherNodeException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,7 @@
package com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake.exception;
public class ClockGoBackException extends RuntimeException {
public ClockGoBackException(String message) {
super(message);
}
}

View File

@@ -0,0 +1,7 @@
package com.hanserwei.hannote.distributed.id.generator.biz.exception;
public class InitException extends Exception{
public InitException(String msg) {
super(msg);
}
}

View File

@@ -0,0 +1,11 @@
package com.hanserwei.hannote.distributed.id.generator.biz.exception;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
@ResponseStatus(code=HttpStatus.INTERNAL_SERVER_ERROR)
public class LeafServerException extends RuntimeException {
public LeafServerException(String msg) {
super(msg);
}
}

View File

@@ -0,0 +1,8 @@
package com.hanserwei.hannote.distributed.id.generator.biz.exception;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
@ResponseStatus(code=HttpStatus.INTERNAL_SERVER_ERROR,reason="Key is none")
public class NoKeyException extends RuntimeException {
}

View File

@@ -0,0 +1,95 @@
package com.hanserwei.hannote.distributed.id.generator.biz.model;
public class SegmentBufferView {
private String key;
private long value0;
private int step0;
private long max0;
private long value1;
private int step1;
private long max1;
private int pos;
private boolean nextReady;
private boolean initOk;
public String getKey() {
return key;
}
public void setKey(String key) {
this.key = key;
}
public long getValue1() {
return value1;
}
public void setValue1(long value1) {
this.value1 = value1;
}
public int getStep1() {
return step1;
}
public void setStep1(int step1) {
this.step1 = step1;
}
public long getMax1() {
return max1;
}
public void setMax1(long max1) {
this.max1 = max1;
}
public long getValue0() {
return value0;
}
public void setValue0(long value0) {
this.value0 = value0;
}
public int getStep0() {
return step0;
}
public void setStep0(int step0) {
this.step0 = step0;
}
public long getMax0() {
return max0;
}
public void setMax0(long max0) {
this.max0 = max0;
}
public int getPos() {
return pos;
}
public void setPos(int pos) {
this.pos = pos;
}
public boolean isNextReady() {
return nextReady;
}
public void setNextReady(boolean nextReady) {
this.nextReady = nextReady;
}
public boolean isInitOk() {
return initOk;
}
public void setInitOk(boolean initOk) {
this.initOk = initOk;
}
}

View File

@@ -0,0 +1,67 @@
package com.hanserwei.hannote.distributed.id.generator.biz.service;
import com.alibaba.druid.pool.DruidDataSource;
import com.hanserwei.hannote.distributed.id.generator.biz.constant.Constants;
import com.hanserwei.hannote.distributed.id.generator.biz.core.IDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.PropertyFactory;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.ZeroIDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.SegmentIDGenImpl;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.IDAllocDao;
import com.hanserwei.hannote.distributed.id.generator.biz.core.segment.dao.impl.IDAllocDaoImpl;
import com.hanserwei.hannote.distributed.id.generator.biz.exception.InitException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import java.sql.SQLException;
import java.util.Properties;
@Service("SegmentService")
public class SegmentService {
private Logger logger = LoggerFactory.getLogger(SegmentService.class);
private IDGen idGen;
private DruidDataSource dataSource;
public SegmentService() throws SQLException, InitException {
Properties properties = PropertyFactory.getProperties();
boolean flag = Boolean.parseBoolean(properties.getProperty(Constants.LEAF_SEGMENT_ENABLE, "true"));
if (flag) {
// Config dataSource
dataSource = new DruidDataSource();
dataSource.setDriverClassName("com.mysql.cj.jdbc.Driver");
dataSource.setUrl(properties.getProperty(Constants.LEAF_JDBC_URL));
dataSource.setUsername(properties.getProperty(Constants.LEAF_JDBC_USERNAME));
dataSource.setPassword(properties.getProperty(Constants.LEAF_JDBC_PASSWORD));
dataSource.setValidationQuery("select 1");
dataSource.init();
// Config Dao
IDAllocDao dao = new IDAllocDaoImpl(dataSource);
// Config ID Gen
idGen = new SegmentIDGenImpl();
((SegmentIDGenImpl) idGen).setDao(dao);
if (idGen.init()) {
logger.info("Segment Service Init Successfully");
} else {
throw new InitException("Segment Service Init Fail");
}
} else {
idGen = new ZeroIDGen();
logger.info("Zero ID Gen Service Init Successfully");
}
}
public Result getId(String key) {
return idGen.get(key);
}
public SegmentIDGenImpl getIdGen() {
if (idGen instanceof SegmentIDGenImpl) {
return (SegmentIDGenImpl) idGen;
}
return null;
}
}

View File

@@ -0,0 +1,44 @@
package com.hanserwei.hannote.distributed.id.generator.biz.service;
import com.hanserwei.hannote.distributed.id.generator.biz.constant.Constants;
import com.hanserwei.hannote.distributed.id.generator.biz.core.IDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.PropertyFactory;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.Result;
import com.hanserwei.hannote.distributed.id.generator.biz.core.common.ZeroIDGen;
import com.hanserwei.hannote.distributed.id.generator.biz.core.snowflake.SnowflakeIDGenImpl;
import com.hanserwei.hannote.distributed.id.generator.biz.exception.InitException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import java.util.Properties;
@Service("SnowflakeService")
public class SnowflakeService {
private Logger logger = LoggerFactory.getLogger(SnowflakeService.class);
private IDGen idGen;
public SnowflakeService() throws InitException {
Properties properties = PropertyFactory.getProperties();
boolean flag = Boolean.parseBoolean(properties.getProperty(Constants.LEAF_SNOWFLAKE_ENABLE, "true"));
if (flag) {
String zkAddress = properties.getProperty(Constants.LEAF_SNOWFLAKE_ZK_ADDRESS);
int port = Integer.parseInt(properties.getProperty(Constants.LEAF_SNOWFLAKE_PORT));
idGen = new SnowflakeIDGenImpl(zkAddress, port);
if(idGen.init()) {
logger.info("Snowflake Service Init Successfully");
} else {
throw new InitException("Snowflake Service Init Fail");
}
} else {
idGen = new ZeroIDGen();
logger.info("Zero ID Gen Service Init Successfully");
}
}
public Result getId(String key) {
return idGen.get(key);
}
}

View File

@@ -0,0 +1,5 @@
spring:
cassandra:
keyspace-name: hannote
contact-points: 127.0.0.1
port: 9042

View File

@@ -0,0 +1,6 @@
server:
port: 8085 # 项目启动的端口
spring:
profiles:
active: dev # 默认激活 dev 本地开发环境

View File

@@ -0,0 +1,12 @@
spring:
application:
name: han-note-distributed-id-generator # 应用名称
profiles:
active: dev # 默认激活 dev 本地开发环境
cloud:
nacos:
discovery:
enabled: true # 启用服务发现
group: DEFAULT_GROUP # 所属组
namespace: han-note # 命名空间
server-addr: 127.0.0.1:8848 # 指定 Nacos 配置中心的服务器地址

View File

@@ -0,0 +1,12 @@
leaf.name=com.sankuai.leaf.opensource.test
leaf.segment.enable=true
leaf.jdbc.url=jdbc:mysql://127.0.0.1:3306/leaf?useUnicode=true&characterEncoding=utf-8&autoReconnect=true&useSSL=false&serverTimezone=Asia/Shanghai
leaf.jdbc.username=root
leaf.jdbc.password=mysql
# ???? snowflake ??
leaf.snowflake.enable=true
# snowflake ???? zk ??
leaf.snowflake.zk.address=127.0.0.1:2181
# snowflake ??????????
leaf.snowflake.port=2222

View File

@@ -0,0 +1,58 @@
<configuration>
<!-- 引用 Spring Boot 的 logback 基础配置 -->
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<!-- 应用名称 -->
<property scope="context" name="appName" value="distributed-id-generator"/>
<!-- 自定义日志输出路径,以及日志名称前缀 -->
<property name="LOG_FILE" value="./logs/${appName}.%d{yyyy-MM-dd}"/>
<!-- 每行日志输出的格式 -->
<property name="LOG_PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n"/>
<!-- 文件输出 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- 日志文件的命名格式 -->
<fileNamePattern>${LOG_FILE}-%i.log</fileNamePattern>
<!-- 保留 30 天的日志文件 -->
<maxHistory>30</maxHistory>
<!-- 单个日志文件最大大小 -->
<maxFileSize>10MB</maxFileSize>
<!-- 日志文件的总大小0 表示不限制 -->
<totalSizeCap>0</totalSizeCap>
<!-- 重启服务时,是否清除历史日志,不推荐清理 -->
<cleanHistoryOnStart>false</cleanHistoryOnStart>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<pattern>${LOG_PATTERN}</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 异步写入日志,提升性能 -->
<appender name="ASYNC_FILE" class="ch.qos.logback.classic.AsyncAppender">
<!-- 是否丢弃日志, 0 表示不丢弃。默认情况下,如果队列满 80%, 会丢弃 TRACE、DEBUG、INFO 级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 队列大小。默认值为 256 -->
<queueSize>256</queueSize>
<appender-ref ref="FILE"/>
</appender>
<!-- 本地 dev 开发环境 -->
<springProfile name="dev">
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<root level="INFO">
<appender-ref ref="CONSOLE"/> <!-- 输出控制台日志 -->
<appender-ref ref="ASYNC_FILE"/> <!-- 打印日志到文件中。PS: 本地环境下,如果不想打印日志到文件,可注释掉此行 -->
</root>
</springProfile>
<!-- 其它环境 -->
<springProfile name="prod">
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<root level="INFO">
<appender-ref ref="ASYNC_FILE"/> <!-- 生产环境下,仅打印日志到文件中 -->
</root>
</springProfile>
</configuration>