Spring bootでデータ・ソースを動的に追加しsharding-jdbcでライブラリ・テーブル・クエリーを作成
最近のプロジェクトでは、データを数倉でドッキングする必要があります.手動でクエリーを書く必要があります.ライブラリ分割テーブルをサポートする必要があります.そのため、google guavaをメモリキャッシュに選択し、redisを通じて永続化キャッシュを行い、sharding-jdbcを通じてテーブル間クエリーを実現するには、いくつかの主要なクラスがあります.
1.データベース情報キャッシュ
キャッシュ
sharding-jdbc構成
1.データベース情報キャッシュ
package com.tudou.potato.datagaea.apps.cache;
import com.alibaba.fastjson.JSONObject;
import com.tudou.potato.datagaea.core.wh.http.resp.RdsLinkDetailResp;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
/**
* @author: ge.li
* @create: 2020-05-22 09:42
**/
@Component
public class DataSourceCache {
private static final String DATAGAEA_APPS_RDS_LINK = "datagaea_apps_rdslink:";
@Autowired
private RedisTemplate redisTemplate;
public RdsLinkDetailResp getRDSLinkInfo(String code) {
try {
String redisKey = DATAGAEA_APPS_RDS_LINK + code;
String jsonStr = redisTemplate.opsForValue().get(redisKey).toString();
return JSONObject.parseObject(jsonStr, RdsLinkDetailResp.class);
} catch (Exception ex) {
return null;
}
}
public void setRDSLinkInfo(RdsLinkDetailResp data) {
String redisKey = DATAGAEA_APPS_RDS_LINK + data.getCode();
redisTemplate.opsForValue().set(redisKey, JSONObject.toJSONString(data));
}
}
キャッシュ
package com.tudou.potato.datagaea.apps.cache;
import com.alibaba.druid.pool.DruidDataSource;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.TimeUnit;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
/**
* @author: ge.li
* @create: 2020-05-19 09:56
**/
public class DynamicDataSource extends AbstractRoutingDataSource {
public final static Cache, DruidDataSource> cache = CacheBuilder.newBuilder()
// cache 10,
.initialCapacity(10)
// 5, 5 cache
.concurrencyLevel(5)
// cache 10 * 60
.expireAfterWrite(10 * 60, TimeUnit.SECONDS)
// cache
.build();
@Override
protected String determineCurrentLookupKey() {
return null;
}
public static void setDataSource(String dataSourceName, DruidDataSource dataSource) {
DynamicDataSource.cache.put(dataSourceName, dataSource);
}
public static DruidDataSource getDataSource(String dataSourceName) {
return DynamicDataSource.cache.getIfPresent(dataSourceName);
}
public static void clear() {
DynamicDataSource.cache.cleanUp();
}
public static DruidDataSource createDataSource(String url, String userName, String Password) {
DruidDataSource druidDataSource = new DruidDataSource();
druidDataSource.setUrl(url);
druidDataSource.setUsername(userName);
druidDataSource.setPassword(Password);
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(30);
return druidDataSource;
}
}
sharding-jdbc構成
package com.tudou.potato.datagaea.apps.cache;
import com.alibaba.druid.pool.DruidDataSource;
import com.tudou.potato.api.bean.result.APIResponse;
import com.tudou.potato.api.message.BizCommMessage;
import com.tudou.potato.common.pojo.principal.PrincipalInfoContext;
import com.tudou.potato.datagaea.apps.tls.base.OnlineQueryResult;
import com.tudou.potato.datagaea.apps.tls.repo.impl.RDBConnectionService;
import com.tudou.potato.datagaea.core.wh.http.resp.RdsLinkDetailResp;
import com.tudou.potato.datagaea.core.wh.remote.RdsLinkRemote;
import io.shardingsphere.api.config.rule.ShardingRuleConfiguration;
import io.shardingsphere.api.config.rule.TableRuleConfiguration;
import io.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.annotation.Resource;
import javax.sql.DataSource;
import org.springframework.stereotype.Component;
/**
* @author: ge.li
* @create: 2020-05-20 10:33
**/
@Component
public class SharedDataSource {
@Resource
private RdsLinkRemote rdsLinkRemote;
@Resource
private RDBConnectionService rDBConnectionService;
@Resource
private DataSourceCache dataSourceCache;
public RdsLinkDetailResp createDataSource(String code) {
RdsLinkDetailResp rdsinfo = null;
DruidDataSource data = DynamicDataSource.getDataSource(code);
try
{
//
rdsinfo = dataSourceCache.getRDSLinkInfo(code);
Connection conn = data.getConnection();
conn.close();
}catch(Exception ex)
{
if (null == rdsinfo) {
APIResponse> rdsinfoResp = rdsLinkRemote
.detailByCode(code, PrincipalInfoContext.getAuthorization(),
PrincipalInfoContext.getReqId());
if (BizCommMessage.OK != rdsinfoResp.getCode()) {
return null;
}
dataSourceCache.setRDSLinkInfo(rdsinfoResp.getData());
rdsinfo = dataSourceCache.getRDSLinkInfo(code);
}
DynamicDataSource.setDataSource(code, DynamicDataSource
.createDataSource(rdsinfo.getJdbcUrl(),
rdsinfo.getUsername(), rdsinfo.getPassword()));
}
return rdsinfo;
}
public OnlineQueryResult sharedDataSourceSelectSQL(List> rdsCodes, String tableName,
List> areaCodes, String sqlStr) throws Exception {
OnlineQueryResult resultData = null;
List> dbNames = new ArrayList();
Map, DataSource> dataSourceMap = new HashMap<>();
for (String code : rdsCodes) {
RdsLinkDetailResp rdsinfo = createDataSource(code);
DruidDataSource data = DynamicDataSource.getDataSource(code);
dataSourceMap.put(rdsinfo.getRdsDbName(), data);
dbNames.add(rdsinfo.getRdsDbName());
}
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
String tables = "";
for (String dbName : dbNames) {
for (String areeCode : areaCodes) {
tables += String
.format("%s.%s_%s,", dbName, tableName, areeCode);
}
}
TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration();
//
orderTableRuleConfig.setLogicTable(tableName);
orderTableRuleConfig.setActualDataNodes(tables);
shardingRuleConfig.getTableRuleConfigs().add(orderTableRuleConfig);
DataSource dataSource = ShardingDataSourceFactory
.createDataSource(dataSourceMap, shardingRuleConfig, new HashMap<>(),
new Properties());
Connection conn = dataSource.getConnection();
resultData = rDBConnectionService.executeSelectSQL(sqlStr, conn);
return resultData;
}
}