|
|
|
@ -99,77 +99,75 @@ import com.fr.third.alibaba.druid.wall.WallProviderStatValue;
|
|
|
|
|
*/ |
|
|
|
|
public class DruidDataSource extends DruidAbstractDataSource implements DruidDataSourceMBean, ManagedDataSource, Referenceable, Closeable, Cloneable, ConnectionPoolDataSource, MBeanRegistration { |
|
|
|
|
|
|
|
|
|
private final static Log LOG = LogFactory.getLog(DruidDataSource.class); |
|
|
|
|
private static final long serialVersionUID = 1L; |
|
|
|
|
private final static Log LOG = LogFactory.getLog(DruidDataSource.class); |
|
|
|
|
private static final long serialVersionUID = 1L; |
|
|
|
|
// stats
|
|
|
|
|
private volatile long recycleErrorCount = 0L; |
|
|
|
|
private long connectCount = 0L; |
|
|
|
|
private long closeCount = 0L; |
|
|
|
|
private volatile long connectErrorCount = 0L; |
|
|
|
|
private long recycleCount = 0L; |
|
|
|
|
private long removeAbandonedCount = 0L; |
|
|
|
|
private long notEmptyWaitCount = 0L; |
|
|
|
|
private long notEmptySignalCount = 0L; |
|
|
|
|
private long notEmptyWaitNanos = 0L; |
|
|
|
|
private int keepAliveCheckCount = 0; |
|
|
|
|
private int activePeak = 0; |
|
|
|
|
private long activePeakTime = 0; |
|
|
|
|
private int poolingPeak = 0; |
|
|
|
|
private long poolingPeakTime = 0; |
|
|
|
|
private volatile long recycleErrorCount = 0L; |
|
|
|
|
private long connectCount = 0L; |
|
|
|
|
private long closeCount = 0L; |
|
|
|
|
private volatile long connectErrorCount = 0L; |
|
|
|
|
private long recycleCount = 0L; |
|
|
|
|
private long removeAbandonedCount = 0L; |
|
|
|
|
private long notEmptyWaitCount = 0L; |
|
|
|
|
private long notEmptySignalCount = 0L; |
|
|
|
|
private long notEmptyWaitNanos = 0L; |
|
|
|
|
private int keepAliveCheckCount = 0; |
|
|
|
|
private int activePeak = 0; |
|
|
|
|
private long activePeakTime = 0; |
|
|
|
|
private int poolingPeak = 0; |
|
|
|
|
private long poolingPeakTime = 0; |
|
|
|
|
// store
|
|
|
|
|
private volatile DruidConnectionHolder[] connections; |
|
|
|
|
private int poolingCount = 0; |
|
|
|
|
private int activeCount = 0; |
|
|
|
|
private volatile long discardCount = 0; |
|
|
|
|
private int notEmptyWaitThreadCount = 0; |
|
|
|
|
private int notEmptyWaitThreadPeak = 0; |
|
|
|
|
private int poolingCount = 0; |
|
|
|
|
private int activeCount = 0; |
|
|
|
|
private volatile long discardCount = 0; |
|
|
|
|
private int notEmptyWaitThreadCount = 0; |
|
|
|
|
private int notEmptyWaitThreadPeak = 0; |
|
|
|
|
//
|
|
|
|
|
private DruidConnectionHolder[] evictConnections; |
|
|
|
|
private DruidConnectionHolder[] keepAliveConnections; |
|
|
|
|
private DruidConnectionHolder[] evictConnections; |
|
|
|
|
private DruidConnectionHolder[] keepAliveConnections; |
|
|
|
|
|
|
|
|
|
// threads
|
|
|
|
|
private volatile ScheduledFuture<?> destroySchedulerFuture; |
|
|
|
|
private DestroyTask destroyTask; |
|
|
|
|
private volatile ScheduledFuture<?> destroySchedulerFuture; |
|
|
|
|
private DestroyTask destroyTask; |
|
|
|
|
|
|
|
|
|
private volatile Future<?> createSchedulerFuture; |
|
|
|
|
private volatile Future<?> createSchedulerFuture; |
|
|
|
|
|
|
|
|
|
private CreateConnectionThread createConnectionThread; |
|
|
|
|
private PeriodDetectionThread periodDetectionThread; |
|
|
|
|
private DestroyConnectionThread destroyConnectionThread; |
|
|
|
|
private LogStatsThread logStatsThread; |
|
|
|
|
private int createTaskCount; |
|
|
|
|
private CreateConnectionThread createConnectionThread; |
|
|
|
|
private DestroyConnectionThread destroyConnectionThread; |
|
|
|
|
private LogStatsThread logStatsThread; |
|
|
|
|
private int createTaskCount; |
|
|
|
|
|
|
|
|
|
private volatile long createTaskIdSeed = 1L; |
|
|
|
|
private long[] createTasks; |
|
|
|
|
private volatile long createTaskIdSeed = 1L; |
|
|
|
|
private long[] createTasks; |
|
|
|
|
|
|
|
|
|
private CountDownLatch initedLatch = new CountDownLatch(2); |
|
|
|
|
private final CountDownLatch initedLatch = new CountDownLatch(2); |
|
|
|
|
|
|
|
|
|
private volatile boolean enable = true; |
|
|
|
|
private volatile boolean enable = true; |
|
|
|
|
|
|
|
|
|
private boolean resetStatEnable = true; |
|
|
|
|
private volatile long resetCount = 0L; |
|
|
|
|
private boolean resetStatEnable = true; |
|
|
|
|
private volatile long resetCount = 0L; |
|
|
|
|
|
|
|
|
|
private String initStackTrace; |
|
|
|
|
private String initStackTrace; |
|
|
|
|
|
|
|
|
|
private volatile boolean closing = false; |
|
|
|
|
private volatile boolean closed = false; |
|
|
|
|
private long closeTimeMillis = -1L; |
|
|
|
|
private volatile boolean closing = false; |
|
|
|
|
private volatile boolean closed = false; |
|
|
|
|
private long closeTimeMillis = -1L; |
|
|
|
|
|
|
|
|
|
protected JdbcDataSourceStat dataSourceStat; |
|
|
|
|
protected JdbcDataSourceStat dataSourceStat; |
|
|
|
|
|
|
|
|
|
private boolean useGlobalDataSourceStat = false; |
|
|
|
|
private boolean mbeanRegistered = false; |
|
|
|
|
public static ThreadLocal<Long> waitNanosLocal = new ThreadLocal<Long>(); |
|
|
|
|
private boolean logDifferentThread = true; |
|
|
|
|
private volatile boolean keepAlive = false; |
|
|
|
|
private SQLException initException = null; |
|
|
|
|
private boolean asyncInit = false; |
|
|
|
|
protected boolean killWhenSocketReadTimeout = false; |
|
|
|
|
protected boolean checkExecuteTime = false; |
|
|
|
|
private boolean useGlobalDataSourceStat = false; |
|
|
|
|
private boolean mbeanRegistered = false; |
|
|
|
|
public static ThreadLocal<Long> waitNanosLocal = new ThreadLocal<Long>(); |
|
|
|
|
private boolean logDifferentThread = true; |
|
|
|
|
private volatile boolean keepAlive = false; |
|
|
|
|
private boolean asyncInit = false; |
|
|
|
|
protected boolean killWhenSocketReadTimeout = false; |
|
|
|
|
protected boolean checkExecuteTime = false; |
|
|
|
|
|
|
|
|
|
private static List<Filter> autoFilters = null; |
|
|
|
|
private boolean loadSpifilterSkip = false; |
|
|
|
|
private volatile DataSourceDisableException disableException = null; |
|
|
|
|
private static List<Filter> autoFilters = null; |
|
|
|
|
private boolean loadSpifilterSkip = false; |
|
|
|
|
private volatile DataSourceDisableException disableException = null; |
|
|
|
|
|
|
|
|
|
protected static final AtomicLongFieldUpdater<DruidDataSource> recycleErrorCountUpdater |
|
|
|
|
= AtomicLongFieldUpdater.newUpdater(DruidDataSource.class, "recycleErrorCount"); |
|
|
|
@ -180,11 +178,11 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
protected static final AtomicLongFieldUpdater<DruidDataSource> createTaskIdSeedUpdater |
|
|
|
|
= AtomicLongFieldUpdater.newUpdater(DruidDataSource.class, "createTaskIdSeed"); |
|
|
|
|
|
|
|
|
|
public DruidDataSource() { |
|
|
|
|
public DruidDataSource(){ |
|
|
|
|
this(false); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public DruidDataSource(boolean fairLock) { |
|
|
|
|
public DruidDataSource(boolean fairLock){ |
|
|
|
|
super(fairLock); |
|
|
|
|
|
|
|
|
|
configFromPropety(System.getProperties()); |
|
|
|
@ -528,45 +526,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private synchronized void doSomethingBeforeCreationThreadBreak() { |
|
|
|
|
String threadName = "Druid-ConnectionPool-Create-" + System.identityHashCode(this) + this.getUrl(); |
|
|
|
|
createConnectionThread = new CreateConnectionThread(threadName); |
|
|
|
|
createConnectionThread.setStarted(false); |
|
|
|
|
String destroyName = "Druid-ConnectionPool-Destroy-" + System.identityHashCode(this) + this.getUrl(); |
|
|
|
|
if (destroyConnectionThread != null) { |
|
|
|
|
if (!destroyConnectionThread.isInterrupted()) { |
|
|
|
|
destroyConnectionThread.interrupt(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
destroyConnectionThread = new DestroyConnectionThread(destroyName); |
|
|
|
|
destroyConnectionThread.setStarted(false); |
|
|
|
|
initedLatch = new CountDownLatch(2); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private void checkThread() throws SQLException { |
|
|
|
|
if (createConnectionThread == null) { |
|
|
|
|
throw new IllegalStateException("createConnectionThread not start!"); |
|
|
|
|
} |
|
|
|
|
if (destroyConnectionThread == null) { |
|
|
|
|
throw new IllegalStateException("destroyConnectionThread not start!"); |
|
|
|
|
} |
|
|
|
|
if (!createConnectionThread.isStarted() && !destroyConnectionThread.isStarted()) { |
|
|
|
|
synchronized (this) {//线程安全问题,加个双检锁
|
|
|
|
|
if (!createConnectionThread.isStarted() && !destroyConnectionThread.isStarted()) { |
|
|
|
|
createConnectionThread.setStarted(true); |
|
|
|
|
createConnectionThread.start(); |
|
|
|
|
destroyConnectionThread.setStarted(true); |
|
|
|
|
destroyConnectionThread.start(); |
|
|
|
|
try { |
|
|
|
|
initedLatch.await(); |
|
|
|
|
} catch (InterruptedException e) { |
|
|
|
|
throw new SQLException(e.getMessage(), e); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public boolean isKillWhenSocketReadTimeout() { |
|
|
|
|
return killWhenSocketReadTimeout; |
|
|
|
|
} |
|
|
|
@ -839,11 +798,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public void init() throws SQLException { |
|
|
|
|
if (initException != null) { |
|
|
|
|
LOG.error("{dataSource-" + this.getID() + "} init error", initException); |
|
|
|
|
throw initException; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (inited) { |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
@ -889,10 +843,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
DbType dbType = DbType.of(this.dbTypeName); |
|
|
|
|
if (dbType == DbType.mysql |
|
|
|
|
|| dbType == DbType.mariadb |
|
|
|
|
|| dbType == DbType.oceanbase |
|
|
|
|
|| dbType == DbType.ads) { |
|
|
|
|
if (JdbcUtils.isMysqlDbType(dbType)) { |
|
|
|
|
boolean cacheServerConfigurationSet = false; |
|
|
|
|
if (this.connectProperties.containsKey("cacheServerConfiguration")) { |
|
|
|
|
cacheServerConfigurationSet = true; |
|
|
|
@ -993,7 +944,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
createAndLogThread(); |
|
|
|
|
createAndStartCreatorThread(); |
|
|
|
|
createAndStartDestroyThread(); |
|
|
|
|
createAndStartDetectThread(); |
|
|
|
|
|
|
|
|
|
initedLatch.await(); |
|
|
|
|
init = true; |
|
|
|
@ -1018,13 +968,16 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
} catch (SQLException e) { |
|
|
|
|
LOG.error("{dataSource-" + this.getID() + "} init error", e); |
|
|
|
|
initException = e; |
|
|
|
|
throw e; |
|
|
|
|
} catch (InterruptedException e) { |
|
|
|
|
throw new SQLException(e.getMessage(), e); |
|
|
|
|
} catch (Throwable e) { |
|
|
|
|
initException = new SQLException(e.getMessage()); |
|
|
|
|
} catch (RuntimeException e){ |
|
|
|
|
LOG.error("{dataSource-" + this.getID() + "} init error", e); |
|
|
|
|
throw e; |
|
|
|
|
} catch (Error e){ |
|
|
|
|
LOG.error("{dataSource-" + this.getID() + "} init error", e); |
|
|
|
|
throw e; |
|
|
|
|
|
|
|
|
|
} finally { |
|
|
|
|
inited = true; |
|
|
|
|
lock.unlock(); |
|
|
|
@ -1122,7 +1075,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
period = 1000; |
|
|
|
|
} |
|
|
|
|
destroySchedulerFuture = destroyScheduler.scheduleAtFixedRate(destroyTask, period, period, |
|
|
|
|
TimeUnit.MILLISECONDS); |
|
|
|
|
TimeUnit.MILLISECONDS); |
|
|
|
|
initedLatch.countDown(); |
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
@ -1134,7 +1087,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
protected void createAndStartCreatorThread() { |
|
|
|
|
if (createScheduler == null) { |
|
|
|
|
String threadName = "Druid-ConnectionPool-Create-" + System.identityHashCode(this) + this.getUrl(); |
|
|
|
|
String threadName = "Druid-ConnectionPool-Create-" + System.identityHashCode(this); |
|
|
|
|
createConnectionThread = new CreateConnectionThread(threadName); |
|
|
|
|
createConnectionThread.start(); |
|
|
|
|
return; |
|
|
|
@ -1143,18 +1096,9 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
initedLatch.countDown(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private void createAndStartDetectThread() { |
|
|
|
|
if (createScheduler == null) { |
|
|
|
|
String threadName = "Druid-ConnectionPool-Detection-" + System.identityHashCode(this) + this.getUrl(); |
|
|
|
|
periodDetectionThread = new PeriodDetectionThread(threadName); |
|
|
|
|
periodDetectionThread.start(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
* load filters from SPI ServiceLoader |
|
|
|
|
* |
|
|
|
|
* |
|
|
|
|
* @see ServiceLoader |
|
|
|
|
*/ |
|
|
|
|
private void initFromSPIServiceLoader() { |
|
|
|
@ -1205,7 +1149,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
* 会去重复 |
|
|
|
|
* |
|
|
|
|
* |
|
|
|
|
* @param filter |
|
|
|
|
*/ |
|
|
|
|
private void addFilter(Filter filter) { |
|
|
|
@ -1237,21 +1181,21 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
return; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
String infoMessage = ""; |
|
|
|
|
String errorMessage = ""; |
|
|
|
|
|
|
|
|
|
if (isTestOnBorrow()) { |
|
|
|
|
infoMessage += "testOnBorrow is true, "; |
|
|
|
|
if (testOnBorrow) { |
|
|
|
|
errorMessage += "testOnBorrow is true, "; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (isTestOnReturn()) { |
|
|
|
|
infoMessage += "testOnReturn is true, "; |
|
|
|
|
if (testOnReturn) { |
|
|
|
|
errorMessage += "testOnReturn is true, "; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (isTestWhileIdle()) { |
|
|
|
|
infoMessage += "testWhileIdle is true, "; |
|
|
|
|
if (testWhileIdle) { |
|
|
|
|
errorMessage += "testWhileIdle is true, "; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
LOG.info(infoMessage + "validationQuery not set"); |
|
|
|
|
LOG.error(errorMessage + "validationQuery not set"); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
protected void resolveDriver() throws SQLException { |
|
|
|
@ -1289,7 +1233,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
if (driver.getMajorVersion() < 10) { |
|
|
|
|
throw new SQLException("not support oracle driver " + driver.getMajorVersion() + "." |
|
|
|
|
+ driver.getMinorVersion()); |
|
|
|
|
+ driver.getMinorVersion()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (driver.getMajorVersion() == 10 && isUseOracleImplicitCache()) { |
|
|
|
@ -1333,7 +1277,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
if (query instanceof SQLSelectQueryBlock) { |
|
|
|
|
if (((SQLSelectQueryBlock) query).getFrom() == null) { |
|
|
|
|
LOG.error("invalid oracle validationQuery. " + validationQuery + ", may should be : " + validationQuery |
|
|
|
|
+ " FROM DUAL"); |
|
|
|
|
+ " FROM DUAL"); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -1362,7 +1306,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
if (query instanceof SQLSelectQueryBlock) { |
|
|
|
|
if (((SQLSelectQueryBlock) query).getFrom() == null) { |
|
|
|
|
LOG.error("invalid db2 validationQuery. " + validationQuery + ", may should be : " + validationQuery |
|
|
|
|
+ " FROM SYSDUMMY"); |
|
|
|
|
+ " FROM SYSDUMMY"); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -1374,22 +1318,27 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
String realDriverClassName = driver.getClass().getName(); |
|
|
|
|
if (JdbcUtils.isMySqlDriver(realDriverClassName)) { |
|
|
|
|
this.validConnectionChecker = new MySqlValidConnectionChecker(); |
|
|
|
|
this.validConnectionChecker = new MySqlValidConnectionChecker(usePingMethod); |
|
|
|
|
|
|
|
|
|
} else if (realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER2)) { |
|
|
|
|
this.validConnectionChecker = new OracleValidConnectionChecker(); |
|
|
|
|
|
|
|
|
|
} else if (realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_SQLJDBC4) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_JTDS)) { |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_SQLJDBC4) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_JTDS)) { |
|
|
|
|
this.validConnectionChecker = new MSSQLValidConnectionChecker(); |
|
|
|
|
|
|
|
|
|
} else if (realDriverClassName.equals(JdbcConstants.POSTGRESQL_DRIVER) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.ENTERPRISEDB_DRIVER) |
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.POLARDB_DRIVER)) { |
|
|
|
|
this.validConnectionChecker = new PGValidConnectionChecker(); |
|
|
|
|
} else if (realDriverClassName.equals(JdbcConstants.OCEANBASE_DRIVER) |
|
|
|
|
|| (realDriverClassName.equals(JdbcConstants.OCEANBASE_DRIVER2))) { |
|
|
|
|
DbType dbType = DbType.of(this.dbTypeName); |
|
|
|
|
this.validConnectionChecker = new OceanBaseValidConnectionChecker(dbType); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private void initExceptionSorter() { |
|
|
|
@ -1402,7 +1351,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (Class<?> driverClass = driver.getClass(); ; ) { |
|
|
|
|
for (Class<?> driverClass = driver.getClass();;) { |
|
|
|
|
String realDriverClassName = driverClass.getName(); |
|
|
|
|
if (realDriverClassName.equals(JdbcConstants.MYSQL_DRIVER) //
|
|
|
|
|
|| realDriverClassName.equals(JdbcConstants.MYSQL_DRIVER_6)) { |
|
|
|
@ -1453,7 +1402,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
public DruidPooledConnection getConnection(long maxWaitMillis) throws SQLException { |
|
|
|
|
init(); |
|
|
|
|
|
|
|
|
|
checkThread(); |
|
|
|
|
if (filters.size() > 0) { |
|
|
|
|
FilterChainImpl filterChain = new FilterChainImpl(this); |
|
|
|
|
return filterChain.dataSource_connect(this, maxWaitMillis); |
|
|
|
@ -1474,7 +1422,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
public DruidPooledConnection getConnectionDirect(long maxWaitMillis) throws SQLException { |
|
|
|
|
int notFullTimeoutRetryCnt = 0; |
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
// handle notFullTimeoutRetry
|
|
|
|
|
DruidPooledConnection poolableConnection; |
|
|
|
|
try { |
|
|
|
@ -1508,10 +1456,10 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
if (testWhileIdle) { |
|
|
|
|
final DruidConnectionHolder holder = poolableConnection.holder; |
|
|
|
|
long currentTimeMillis = System.currentTimeMillis(); |
|
|
|
|
long lastActiveTimeMillis = holder.lastActiveTimeMillis; |
|
|
|
|
long lastExecTimeMillis = holder.lastExecTimeMillis; |
|
|
|
|
long lastKeepTimeMillis = holder.lastKeepTimeMillis; |
|
|
|
|
long currentTimeMillis = System.currentTimeMillis(); |
|
|
|
|
long lastActiveTimeMillis = holder.lastActiveTimeMillis; |
|
|
|
|
long lastExecTimeMillis = holder.lastExecTimeMillis; |
|
|
|
|
long lastKeepTimeMillis = holder.lastKeepTimeMillis; |
|
|
|
|
|
|
|
|
|
if (checkExecuteTime |
|
|
|
|
&& lastExecTimeMillis != lastActiveTimeMillis) { |
|
|
|
@ -1522,7 +1470,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
lastActiveTimeMillis = lastKeepTimeMillis; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
long idleMillis = currentTimeMillis - lastActiveTimeMillis; |
|
|
|
|
long idleMillis = currentTimeMillis - lastActiveTimeMillis; |
|
|
|
|
|
|
|
|
|
long timeBetweenEvictionRunsMillis = this.timeBetweenEvictionRunsMillis; |
|
|
|
|
|
|
|
|
@ -1532,7 +1480,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
if (idleMillis >= timeBetweenEvictionRunsMillis |
|
|
|
|
|| idleMillis < 0 // unexcepted branch
|
|
|
|
|
) { |
|
|
|
|
) { |
|
|
|
|
boolean validate = testConnectionInternal(poolableConnection.holder, poolableConnection.conn); |
|
|
|
|
if (!validate) { |
|
|
|
|
if (LOG.isDebugEnabled()) { |
|
|
|
@ -1540,7 +1488,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
discardConnection(poolableConnection.holder); |
|
|
|
|
continue; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -1570,7 +1518,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
* 抛弃连接,不进行回收,而是抛弃 |
|
|
|
|
* |
|
|
|
|
* |
|
|
|
|
* @param realConnection |
|
|
|
|
* @deprecated |
|
|
|
|
*/ |
|
|
|
@ -1643,7 +1591,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
DruidConnectionHolder holder; |
|
|
|
|
|
|
|
|
|
for (boolean createDirect = false; ; ) { |
|
|
|
|
for (boolean createDirect = false;;) { |
|
|
|
|
if (createDirect) { |
|
|
|
|
createStartNanosUpdater.set(this, System.nanoTime()); |
|
|
|
|
if (creatingCountUpdater.compareAndSet(this, 0, 1)) { |
|
|
|
@ -1792,10 +1740,10 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
StringBuilder buf = new StringBuilder(128); |
|
|
|
|
buf.append("wait millis ")//
|
|
|
|
|
.append(waitNanos / (1000 * 1000))//
|
|
|
|
|
.append(", active ").append(activeCount)//
|
|
|
|
|
.append(", maxActive ").append(maxActive)//
|
|
|
|
|
.append(", creating ").append(creatingCount)//
|
|
|
|
|
.append(waitNanos / (1000 * 1000))//
|
|
|
|
|
.append(", active ").append(activeCount)//
|
|
|
|
|
.append(", maxActive ").append(maxActive)//
|
|
|
|
|
.append(", creating ").append(creatingCount)//
|
|
|
|
|
; |
|
|
|
|
if (creatingCount > 0 && createStartNanos > 0) { |
|
|
|
|
long createElapseMillis = (System.nanoTime() - createStartNanos) / (1000 * 1000); |
|
|
|
@ -1808,21 +1756,17 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
buf.append(", createErrorCount ").append(createErrorCount); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
JdbcDataSourceStat sourceStat = this.getDataSourceStat(); |
|
|
|
|
if (sourceStat != null) { |
|
|
|
|
List<JdbcSqlStatValue> sqlList = sourceStat.getRuningSqlList(); |
|
|
|
|
for (int i = 0; i < sqlList.size(); ++i) { |
|
|
|
|
if (i != 0) { |
|
|
|
|
buf.append('\n'); |
|
|
|
|
} else { |
|
|
|
|
buf.append(", "); |
|
|
|
|
} |
|
|
|
|
JdbcSqlStatValue sql = sqlList.get(i); |
|
|
|
|
buf.append("runningSqlCount "); |
|
|
|
|
buf.append(sql.getRunningCount()); |
|
|
|
|
buf.append(" : "); |
|
|
|
|
buf.append(sql.getSql()); |
|
|
|
|
List<JdbcSqlStatValue> sqlList = this.getDataSourceStat().getRuningSqlList(); |
|
|
|
|
for (int i = 0; i < sqlList.size(); ++i) { |
|
|
|
|
if (i != 0) { |
|
|
|
|
buf.append('\n'); |
|
|
|
|
} else { |
|
|
|
|
buf.append(", "); |
|
|
|
|
} |
|
|
|
|
JdbcSqlStatValue sql = sqlList.get(i); |
|
|
|
|
buf.append("runningSqlCount ").append(sql.getRunningCount()); |
|
|
|
|
buf.append(" : "); |
|
|
|
|
buf.append(sql.getSql()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
String errorMessage = buf.toString(); |
|
|
|
@ -1914,12 +1858,13 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
lock.unlock(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (onFatalError && holder != null && holder.getDataSource() != null) { |
|
|
|
|
if(onFatalError && holder != null && holder.getDataSource() != null) { |
|
|
|
|
ReentrantLock dataSourceLock = holder.getDataSource().lock; |
|
|
|
|
dataSourceLock.lock(); |
|
|
|
|
try { |
|
|
|
|
emptySignal(); |
|
|
|
|
} finally { |
|
|
|
|
} |
|
|
|
|
finally { |
|
|
|
|
dataSourceLock.unlock(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -1956,8 +1901,8 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (logDifferentThread //
|
|
|
|
|
&& (!isAsyncCloseConnectionEnable()) //
|
|
|
|
|
&& pooledConnection.ownerThread != Thread.currentThread()//
|
|
|
|
|
&& (!isAsyncCloseConnectionEnable()) //
|
|
|
|
|
&& pooledConnection.ownerThread != Thread.currentThread()//
|
|
|
|
|
) { |
|
|
|
|
LOG.warn("get/close not same thread"); |
|
|
|
|
} |
|
|
|
@ -2152,10 +2097,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
destroyConnectionThread.interrupt(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (periodDetectionThread != null) { |
|
|
|
|
periodDetectionThread.interrupt(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (createSchedulerFuture != null) { |
|
|
|
|
createSchedulerFuture.cancel(true); |
|
|
|
|
} |
|
|
|
@ -2213,7 +2154,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
@Override |
|
|
|
|
public Object run() { |
|
|
|
|
ObjectName objectName = DruidDataSourceStatManager.addDataSource(DruidDataSource.this, |
|
|
|
|
DruidDataSource.this.name); |
|
|
|
|
DruidDataSource.this.name); |
|
|
|
|
|
|
|
|
|
DruidDataSource.this.setObjectName(objectName); |
|
|
|
|
DruidDataSource.this.mbeanRegistered = true; |
|
|
|
@ -2307,7 +2248,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
private DruidConnectionHolder pollLast(long nanos) throws InterruptedException, SQLException { |
|
|
|
|
long estimate = nanos; |
|
|
|
|
|
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
if (poolingCount == 0) { |
|
|
|
|
emptySignal(); // send signal to CreateThread create connection
|
|
|
|
|
|
|
|
|
@ -2328,8 +2269,8 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
try { |
|
|
|
|
long startEstimate = estimate; |
|
|
|
|
estimate = notEmpty.awaitNanos(estimate); // signal by
|
|
|
|
|
// recycle or
|
|
|
|
|
// creator
|
|
|
|
|
// recycle or
|
|
|
|
|
// creator
|
|
|
|
|
notEmptyWaitCount++; |
|
|
|
|
notEmptyWaitNanos += (startEstimate - estimate); |
|
|
|
|
|
|
|
|
@ -2553,7 +2494,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
value.setCommitCount(commitCountUpdater.getAndSet(this, 0)); |
|
|
|
|
value.setRollbackCount(rollbackCountUpdater.getAndSet(this, 0)); |
|
|
|
|
|
|
|
|
|
value.setPstmtCacheHitCount(cachedPreparedStatementHitCountUpdater.getAndSet(this, 0)); |
|
|
|
|
value.setPstmtCacheHitCount(cachedPreparedStatementHitCountUpdater.getAndSet(this,0)); |
|
|
|
|
value.setPstmtCacheMissCount(cachedPreparedStatementMissCountUpdater.getAndSet(this, 0)); |
|
|
|
|
|
|
|
|
|
value.setStartTransactionCount(startTransactionCountUpdater.getAndSet(this, 0)); |
|
|
|
@ -2631,7 +2572,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
clearCreateTask(createTaskId); |
|
|
|
|
|
|
|
|
|
if (poolingCount + createTaskCount < notEmptyWaitThreadCount //
|
|
|
|
|
&& activeCount + poolingCount + createTaskCount < maxActive) { |
|
|
|
|
&& activeCount + poolingCount + createTaskCount < maxActive) { |
|
|
|
|
emptySignal(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -2643,7 +2584,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
public class CreateConnectionTask implements Runnable { |
|
|
|
|
|
|
|
|
|
private int errorCount = 0; |
|
|
|
|
private int errorCount = 0; |
|
|
|
|
private boolean initTask = false; |
|
|
|
|
private final long taskId; |
|
|
|
|
|
|
|
|
@ -2662,7 +2603,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private void runInternal() { |
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
|
|
|
|
|
// addLast
|
|
|
|
|
lock.lock(); |
|
|
|
@ -2828,9 +2769,8 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public class CreateConnectionThread extends Thread { |
|
|
|
|
private volatile boolean started = true; |
|
|
|
|
|
|
|
|
|
public CreateConnectionThread(String name) { |
|
|
|
|
public CreateConnectionThread(String name){ |
|
|
|
|
super(name); |
|
|
|
|
this.setDaemon(true); |
|
|
|
|
} |
|
|
|
@ -2840,7 +2780,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
long lastDiscardCount = 0; |
|
|
|
|
int errorCount = 0; |
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
// addLast
|
|
|
|
|
try { |
|
|
|
|
lock.lockInterruptibly(); |
|
|
|
@ -2889,7 +2829,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
if ((!closing) && (!closed)) { |
|
|
|
|
LOG.error("create connection Thread Interrupted, url: " + jdbcUrl, e); |
|
|
|
|
} |
|
|
|
|
DruidDataSource.this.doSomethingBeforeCreationThreadBreak(); |
|
|
|
|
break; |
|
|
|
|
} finally { |
|
|
|
|
lock.unlock(); |
|
|
|
@ -2899,13 +2838,9 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
try { |
|
|
|
|
connection = createPhysicalConnection(); |
|
|
|
|
} catch (SQLException | RuntimeException e) { |
|
|
|
|
if (e instanceof SQLException) { |
|
|
|
|
LOG.error("create connection error, url: " + jdbcUrl + ", errorCode " + ((SQLException) e).getErrorCode() |
|
|
|
|
+ ", state " + ((SQLException) e).getSQLState(), e); |
|
|
|
|
} else { |
|
|
|
|
LOG.error("create connection error", e); |
|
|
|
|
} |
|
|
|
|
} catch (SQLException e) { |
|
|
|
|
LOG.error("create connection SQLException, url: " + jdbcUrl + ", errorCode " + e.getErrorCode() |
|
|
|
|
+ ", state " + e.getSQLState(), e); |
|
|
|
|
|
|
|
|
|
errorCount++; |
|
|
|
|
if (errorCount > connectionErrorRetryAttempts && timeBetweenConnectErrorMillis > 0) { |
|
|
|
@ -2926,16 +2861,17 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
try { |
|
|
|
|
Thread.sleep(timeBetweenConnectErrorMillis); |
|
|
|
|
} catch (InterruptedException ignore) { |
|
|
|
|
|
|
|
|
|
} catch (InterruptedException interruptEx) { |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
DruidDataSource.this.doSomethingBeforeCreationThreadBreak(); |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
} catch (RuntimeException e) { |
|
|
|
|
LOG.error("create connection RuntimeException", e); |
|
|
|
|
setFailContinuous(true); |
|
|
|
|
continue; |
|
|
|
|
} catch (Error e) { |
|
|
|
|
LOG.error("create connection Error", e); |
|
|
|
|
setFailContinuous(true); |
|
|
|
|
DruidDataSource.this.doSomethingBeforeCreationThreadBreak(); |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -2956,49 +2892,11 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public boolean isStarted() { |
|
|
|
|
return started; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public void setStarted(boolean started) { |
|
|
|
|
this.started = started; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//周期性检查生产线程状态,因为在终止生产线程的时候,为了不让生产线程疯狂重试数据库,只是生成了一个生产线程,但是并没有start,需要一个守护线程
|
|
|
|
|
//周期性检查线程状态,帮助其启动。
|
|
|
|
|
private class PeriodDetectionThread extends Thread { |
|
|
|
|
public PeriodDetectionThread(String name) { |
|
|
|
|
super(name); |
|
|
|
|
this.setDaemon(true); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public void run() { |
|
|
|
|
while (true) { |
|
|
|
|
synchronized (DruidDataSource.this) { |
|
|
|
|
//生产线程发生了切换,并且有线程在等待连接,需要主动唤醒生产线程,否则由getConnection方法来唤醒生产线程
|
|
|
|
|
if (!createConnectionThread.started && !destroyConnectionThread.started && notEmptyWaitThreadCount > 0) { |
|
|
|
|
createConnectionThread.setStarted(true); |
|
|
|
|
createConnectionThread.start(); |
|
|
|
|
destroyConnectionThread.setStarted(true); |
|
|
|
|
destroyConnectionThread.start(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
try { |
|
|
|
|
Thread.sleep(30000); |
|
|
|
|
} catch (InterruptedException ignore) { |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public class DestroyConnectionThread extends Thread { |
|
|
|
|
|
|
|
|
|
private volatile boolean started = true; |
|
|
|
|
|
|
|
|
|
public DestroyConnectionThread(String name) { |
|
|
|
|
public DestroyConnectionThread(String name){ |
|
|
|
|
super(name); |
|
|
|
|
this.setDaemon(true); |
|
|
|
|
} |
|
|
|
@ -3006,7 +2904,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
public void run() { |
|
|
|
|
initedLatch.countDown(); |
|
|
|
|
|
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
// 从前面开始删除
|
|
|
|
|
try { |
|
|
|
|
if (closed || closing) { |
|
|
|
@ -3030,13 +2928,6 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public boolean isStarted() { |
|
|
|
|
return started; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public void setStarted(boolean started) { |
|
|
|
|
this.started = started; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public class DestroyTask implements Runnable { |
|
|
|
@ -3057,14 +2948,14 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
|
|
|
|
|
public class LogStatsThread extends Thread { |
|
|
|
|
|
|
|
|
|
public LogStatsThread(String name) { |
|
|
|
|
public LogStatsThread(String name){ |
|
|
|
|
super(name); |
|
|
|
|
this.setDaemon(true); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
public void run() { |
|
|
|
|
try { |
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
try { |
|
|
|
|
logStats(); |
|
|
|
|
} catch (Exception e) { |
|
|
|
@ -3090,7 +2981,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
try { |
|
|
|
|
Iterator<DruidPooledConnection> iter = activeConnections.keySet().iterator(); |
|
|
|
|
|
|
|
|
|
for (; iter.hasNext(); ) { |
|
|
|
|
for (; iter.hasNext();) { |
|
|
|
|
DruidPooledConnection pooledConnection = iter.next(); |
|
|
|
|
|
|
|
|
|
if (pooledConnection.isRunning()) { |
|
|
|
@ -3142,7 +3033,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
buf.append("ownerThread current state is " + pooledConnection.getOwnerThread().getState() |
|
|
|
|
+ ", current stackTrace\n"); |
|
|
|
|
+ ", current stackTrace\n"); |
|
|
|
|
trace = pooledConnection.getOwnerThread().getStackTrace(); |
|
|
|
|
for (int i = 0; i < trace.length; i++) { |
|
|
|
|
buf.append("\tat "); |
|
|
|
@ -3158,9 +3049,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
return removeCount; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/** |
|
|
|
|
* Instance key |
|
|
|
|
*/ |
|
|
|
|
/** Instance key */ |
|
|
|
|
protected String instanceKey = null; |
|
|
|
|
|
|
|
|
|
public Reference getReference() throws NamingException { |
|
|
|
@ -3230,7 +3119,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
int keepAliveCount = 0; |
|
|
|
|
int fatalErrorIncrement = fatalErrorCount - fatalErrorCountLastShrink; |
|
|
|
|
fatalErrorCountLastShrink = fatalErrorCount; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try { |
|
|
|
|
if (!inited) { |
|
|
|
|
return; |
|
|
|
@ -3241,7 +3130,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
for (int i = 0; i < poolingCount; ++i) { |
|
|
|
|
DruidConnectionHolder connection = connections[i]; |
|
|
|
|
|
|
|
|
|
if ((onFatalError || fatalErrorIncrement > 0) && (lastFatalErrorTimeMillis > connection.connectTimeMillis)) { |
|
|
|
|
if ((onFatalError || fatalErrorIncrement > 0) && (lastFatalErrorTimeMillis > connection.connectTimeMillis)) { |
|
|
|
|
keepAliveConnections[keepAliveCount++] = connection; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
@ -3865,7 +3754,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (this.statLogger != null |
|
|
|
|
&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) { |
|
|
|
|
&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) { |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -3881,7 +3770,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (this.statLogger != null |
|
|
|
|
&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) { |
|
|
|
|
&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) { |
|
|
|
|
return (T) statLogger; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -3925,7 +3814,7 @@ public class DruidDataSource extends DruidAbstractDataSource implements DruidDat
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int fillCount = 0; |
|
|
|
|
for (; ; ) { |
|
|
|
|
for (;;) { |
|
|
|
|
try { |
|
|
|
|
lock.lockInterruptibly(); |
|
|
|
|
} catch (InterruptedException e) { |
|
|
|
|