概述
并发编程优化之动态化线程池
线程池(Thread Pool)是一种基于池化思想管理线程的工具,经常出现在多线程服务器中。我们常常在定义线程池的参数后,生产中还是会发生各种各样的参数不够的问题,此时就需要我们在对参数做优化,那每次都修改发布修改发布,成本就太大了,在抢购或者是促销的场景下,也不现实。那就引出了一个解决方案–动态化线程池,讲线程的核心参数保存在配置中上,在需要修改的时候直接修改即可。画不多少了,直接上代码。
整体设计
动态化线程池的核心设计包括以下三个方面:
简化线程池配置:线程池构造参数有8个,但是最核心的是3个:corePoolSize、maximumPoolSize,workQueue,它们最大程度地决定了线程池的任务分配和线程分配策略。考虑到在实际应用中我们获取并发性的场景主要是两种:
(1)并行执行子任务,提高响应速度。这种情况下,应该使用同步队列,没有什么任务应该被缓存下来,而是应该立即执行。
(2)并行执行大批次任务,提升吞吐量。这种情况下,应该使用有界队列,使用队列去缓冲大批量的任务,队列容量必须声明,防止任务无限制堆积。所以线程池只需要提供这三个关键参数的配置,并且提供两种队列的选择,就可以满足绝大多数的业务需求,Less is More。
参数可动态修改:为了解决参数不好配,修改参数成本高等问题。在Java线程池留有高扩展性的基础上,封装线程池,允许线程池监听同步外部的消息,根据消息进行修改配置。将线程池的配置放置在平台侧,允许开发同学简单的查看、修改线程池配置。
增加线程池监控:对某事物缺乏状态的观测,就对其改进无从下手。在线程池执行任务的生命周期添加监控能力,帮助开发同学了解线程池状态。
实现
线程池重构
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* [动态线程池]
*
* @author : [Administrator]
* @version : [v1.0]
* @createTime : [2023/2/12 22:42]
*/
public class DynamicThreadPoolExecutor extends ThreadPoolExecutor{
/**
* 线程池名称
*/
private String threadPoolName ;
private String defaultTaskName = "defaultTask";
/**
* 默认的拒绝策略
*/
private static final RejectedExecutionHandler defaultHandler = new ThreadPoolExecutor.AbortPolicy();
/**
* 运行状态
*/
private Map<String, Map> transactionMap = new ConcurrentHashMap<>();
/**
* 运行状态
*/
private Map<String, String> runnableNameMap = new ConcurrentHashMap<>();
public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue);
}
public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory);
}
public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, handler);
}
public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
}
public DynamicThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler, String threadPoolName) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
this.threadPoolName = threadPoolName;
}
@Override
public void execute(Runnable command) {
runnableNameMap.putIfAbsent("class",command.getClass().getSimpleName());
runnableNameMap.putIfAbsent("task",defaultTaskName);
runnableNameMap.putIfAbsent("start_time",String.valueOf(System.currentTimeMillis()));
super.execute(command);
}
public void execute(Runnable command,String taskName) {
runnableNameMap.putIfAbsent("class",command.getClass().getSimpleName());
runnableNameMap.putIfAbsent("task",taskName);
runnableNameMap.putIfAbsent("start_time",String.valueOf(System.currentTimeMillis()));
super.execute(command);
}
public Future<?> submit(Runnable command, String taskName) {
runnableNameMap.putIfAbsent("class",command.getClass().getSimpleName());
runnableNameMap.putIfAbsent("task",taskName);
runnableNameMap.putIfAbsent("start_time",String.valueOf(System.currentTimeMillis()));
return super.submit(command);
}
public Future<?> submit(Runnable command) {
runnableNameMap.putIfAbsent("class",command.getClass().getSimpleName());
runnableNameMap.putIfAbsent("task",defaultTaskName);
runnableNameMap.putIfAbsent("start_time",String.valueOf(System.currentTimeMillis()));
return super.submit(command);
}
public <T> Future<T> submit(Callable<T> task, String taskName) {
runnableNameMap.putIfAbsent("class",task.getClass().getSimpleName());
runnableNameMap.putIfAbsent("task",taskName);
runnableNameMap.putIfAbsent("start_time",String.valueOf(System.currentTimeMillis()));
return super.submit(task);
}
@Override
protected void beforeExecute(Thread t, Runnable r) {
String threadName = Thread.currentThread().getName();
transactionMap.put(threadName,runnableNameMap);
super.beforeExecute(t, r);
}
@Override
protected void afterExecute(Runnable r, Throwable t) {
super.afterExecute(r, t);
String threadName = Thread.currentThread().getName();
Map<String,String> runnableNameMap = transactionMap.get(threadName);
if (t!=null){
runnableNameMap.putIfAbsent("flag","ERROR");
runnableNameMap.putIfAbsent("errorLog",t.toString());
}else {
runnableNameMap.putIfAbsent("flag","SUCCESS");
}
runnableNameMap.putIfAbsent("end_time",String.valueOf(System.currentTimeMillis()));
transactionMap.putIfAbsent(threadName,runnableNameMap);
}
public Map<String, Map> getTransactionMap() {
return transactionMap;
}
}
线程池监控管理器
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
/**
* [动态线程池管理器]
*
* @author : [Administrator]
* @version : [v1.0]
* @createTime : [2023/2/12 22:47]
*/
public class DynamicThreadPoolExecutorManager {
/**
* 存储动态线程池
*/
private static Map<String, DynamicThreadPoolExecutor> threadPoolExecutorMap = new HashMap<>();
/**
* 创建线程池
*
* @param poolName 线程池名称
* @param corePoolSize 核心线程数
* @param maximunPoolSize 最大线程池
* @param keepAliveTime 保持活跃时间
* @param timeUnit 时间单位
* @param queueSize 队列长度
* @return @See com.yumingjiang.threadpool.DynamicThreadPoolExecutor
*/
public static DynamicThreadPoolExecutor createThreadPoolExecutor(String poolName, int corePoolSize, int maximunPoolSize,
long keepAliveTime, TimeUnit timeUnit, int queueSize) {
DynamicThreadPoolExecutor dynamicThreadPoolExecutor = new DynamicThreadPoolExecutor(corePoolSize, maximunPoolSize, keepAliveTime, timeUnit,
new ResizableCapacityLinkedBlockingQueue<>(queueSize));
threadPoolExecutorMap.put(poolName, dynamicThreadPoolExecutor);
return dynamicThreadPoolExecutor;
}
public static void refreshThreadPoolExecutor(String poolName, int corePoolSize, int maximunPoolSize,
long keepAliveTime, TimeUnit timeUnit, int queueSize) {
DynamicThreadPoolExecutor dynamicThreadPoolExecutor = getDynamicThreadPoolExecutor(poolName);
dynamicThreadPoolExecutor.setCorePoolSize(corePoolSize);
dynamicThreadPoolExecutor.setMaximumPoolSize(maximunPoolSize);
dynamicThreadPoolExecutor.setKeepAliveTime(keepAliveTime, timeUnit);
BlockingQueue<Runnable> queue = dynamicThreadPoolExecutor.getQueue();
//todo 如果queue是可变的队列则变动
}
/**
* 打印池的核心参数
*
* @param poolName
*/
public static void printThreadPoolExecutor(String poolName) {
DynamicThreadPoolExecutor executor = getDynamicThreadPoolExecutor(poolName);
System.out.println("poolName:" + poolName + ", coreSize:" + executor.getCorePoolSize() + ", maximumPoolSize:" + executor.getMaximumPoolSize());
}
/**
* 打印池的状态
*
* @param poolName
*/
public static void printThreadPoolExecutorStatus(String poolName) {
DynamicThreadPoolExecutor executor = getDynamicThreadPoolExecutor(poolName);
StringBuffer sb = new StringBuffer();
sb.append("activeCount:" + executor.getActiveCount())
.append(",complateTaskCount:" + executor.getCompletedTaskCount())
.append(",largestPoolSize:" + executor.getLargestPoolSize())
.append(",taskCount:" + executor.getTaskCount())
.append(",waitTaskCount:" + executor.getQueue().size())
.toString();
System.out.println(sb);
}
/**
* 从管理线程池的容器中获得指定线程池
*
* @param poolName
* @return
*/
public static DynamicThreadPoolExecutor getDynamicThreadPoolExecutor(String poolName) {
DynamicThreadPoolExecutor dynamicThreadPoolExecutor = getThreadPoolExecutorMap().get(poolName);
if (dynamicThreadPoolExecutor == null) {
throw new NullPointerException("### 线程池找不到 poolName:" + poolName);
}
return dynamicThreadPoolExecutor;
}
public static Map<String, DynamicThreadPoolExecutor> getThreadPoolExecutorMap() {
return threadPoolExecutorMap;
}
}
自定义可变的阻塞队列
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
/**
* 可变容量的任务队列
* @param <E>
*/
public class ResizableCapacityLinkedBlockingQueue<E> extends AbstractQueue<E>
implements BlockingQueue<E>, java.io.Serializable {
private static final long serialVersionUID = -6903933977591709194L;
/*
* A variant of the "two lock queue" algorithm. The putLock gates
* entry to put (and offer), and has an associated condition for
* waiting puts. Similarly for the takeLock. The "count" field
* that they both rely on is maintained as an atomic to avoid
* needing to get both locks in most cases. Also, to minimize need
* for puts to get takeLock and vice-versa, cascading notifies are
* used. When a put notices that it has enabled at least one take,
* it signals taker. That taker in turn signals others if more
* items have been entered since the signal. And symmetrically for
* takes signalling puts. Operations such as remove(Object) and
* iterators acquire both locks.
*
* Visibility between writers and readers is provided as follows:
*
* Whenever an element is enqueued, the putLock is acquired and
* count updated. A subsequent reader guarantees visibility to the
* enqueued Node by either acquiring the putLock (via fullyLock)
* or by acquiring the takeLock, and then reading n = count.get();
* this gives visibility to the first n items.
*
* To implement weakly consistent iterators, it appears we need to
* keep all Nodes GC-reachable from a predecessor dequeued Node.
* That would cause two problems:
* - allow a rogue Iterator to cause unbounded memory retention
* - cause cross-generational linking of old Nodes to new Nodes if
* a Node was tenured while live, which generational GCs have a
* hard time dealing with, causing repeated major collections.
* However, only non-deleted Nodes need to be reachable from
* dequeued Nodes, and reachability does not necessarily have to
* be of the kind understood by the GC. We use the trick of
* linking a Node that has just been dequeued to itself. Such a
* self-link implicitly means to advance to head.next.
*/
/**
* Linked list node class
*/
static class Node<E> {
E item;
/**getCapacity
* One of:
* - the real successor Node
* - this Node, meaning the successor is head.next
* - null, meaning there is no successor (this is the last node)
*/
ResizableCapacityLinkedBlockingQueue.Node<E> next;
Node(E x) { item = x; }
}
private ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
/** The capacity bound, or Integer.MAX_VALUE if none */
private volatile int capacity;
/** Current number of elements */
private final AtomicInteger count = new AtomicInteger();
/**
* Head of linked list.
* Invariant: head.item == null
*/
transient ResizableCapacityLinkedBlockingQueue.Node<E> head;
/**
* Tail of linked list.
* Invariant: last.next == null
*/
private transient ResizableCapacityLinkedBlockingQueue.Node<E> last;
/** Lock held by take, poll, etc */
private final ReentrantLock takeLock = new ReentrantLock();
/** Wait queue for waiting takes */
private final Condition notEmpty = takeLock.newCondition();
/** Lock held by put, offer, etc */
private final ReentrantLock putLock = new ReentrantLock();
/** Wait queue for waiting puts */
private final Condition notFull = putLock.newCondition();
/**
* Signals a waiting take. Called only from put/offer (which do not
* otherwise ordinarily lock takeLock.)
*/
private void signalNotEmpty() {
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
notEmpty.signal();
} finally {
takeLock.unlock();
}
}
/**
* Signals a waiting put. Called only from take/poll.
*/
private void signalNotFull() {
final ReentrantLock putLock = this.putLock;
putLock.lock();
try {
notFull.signal();
} finally {
putLock.unlock();
}
}
/**
* Links node at end of queue.
*
* @param node the node
*/
private void enqueue(ResizableCapacityLinkedBlockingQueue.Node<E> node) {
// assert putLock.isHeldByCurrentThread();
// assert last.next == null;
last = last.next = node;
}
/**
* Removes a node from head of queue.
*
* @return the node
*/
private E dequeue() {
// assert takeLock.isHeldByCurrentThread();
// assert head.item == null;
ResizableCapacityLinkedBlockingQueue.Node<E> h = head;
ResizableCapacityLinkedBlockingQueue.Node<E> first = h.next;
h.next = h; // help GC
head = first;
E x = first.item;
first.item = null;
return x;
}
/**
* Locks to prevent both puts and takes.
*/
void fullyLock() {
putLock.lock();
takeLock.lock();
}
/**
* Unlocks to allow both puts and takes.
*/
void fullyUnlock() {
takeLock.unlock();
putLock.unlock();
}
// /**
// * Tells whether both locks are held by current thread.
// */
// boolean isFullyLocked() {
// return (putLock.isHeldByCurrentThread() &&
// takeLock.isHeldByCurrentThread());
// }
/**
* Creates a {@code LinkedBlockingQueue} with a capacity of
* {@link Integer#MAX_VALUE}.
*/
public ResizableCapacityLinkedBlockingQueue() {
this(Integer.MAX_VALUE);
}
/**
* Creates a {@code LinkedBlockingQueue} with the given (fixed) capacity.
*
* @param capacity the capacity of this queue
* @throws IllegalArgumentException if {@code capacity} is not greater
* than zero
*/
public ResizableCapacityLinkedBlockingQueue(int capacity) {
if (capacity <= 0) throw new IllegalArgumentException();
this.capacity = capacity;
last = head = new ResizableCapacityLinkedBlockingQueue.Node<E>(null);
}
/**
* Creates a {@code LinkedBlockingQueue} with a capacity of
* {@link Integer#MAX_VALUE}, initially containing the elements of the
* given collection,
* added in traversal order of the collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public ResizableCapacityLinkedBlockingQueue(Collection<? extends E> c) {
this(Integer.MAX_VALUE);
final ReentrantLock putLock = this.putLock;
putLock.lock(); // Never contended, but necessary for visibility
try {
int n = 0;
for (E e : c) {
if (e == null)
throw new NullPointerException();
if (n == getCapacity())
throw new IllegalStateException("Queue full");
enqueue(new ResizableCapacityLinkedBlockingQueue.Node<E>(e));
++n;
}
count.set(n);
} finally {
putLock.unlock();
}
}
// this doc comment is overridden to remove the reference to collections
// greater in size than Integer.MAX_VALUE
/**
* Returns the number of elements in this queue.
*
* @return the number of elements in this queue
*/
public int size() {
return count.get();
}
// this doc comment is a modified copy of the inherited doc comment,
// without the reference to unlimited queues.
/**
* Returns the number of additional elements that this queue can ideally
* (in the absence of memory or resource constraints) accept without
* blocking. This is always equal to the initial capacity of this queue
* less the current {@code size} of this queue.
*
* <p>Note that you <em>cannot</em> always tell if an attempt to insert
* an element will succeed by inspecting {@code remainingCapacity}
* because it may be the case that another thread is about to
* insert or remove an element.
*/
public int remainingCapacity() {
return getCapacity() - count.get();
}
/**
* Inserts the specified element at the tail of this queue, waiting if
* necessary for space to become available.
*
* @throws InterruptedException {@inheritDoc}
* @throws NullPointerException {@inheritDoc}
*/
public void put(E e) throws InterruptedException {
if (e == null) throw new NullPointerException();
// Note: convention in all put/take/etc is to preset local var
// holding count negative to indicate failure unless set.
int c = -1;
ResizableCapacityLinkedBlockingQueue.Node<E> node = new ResizableCapacityLinkedBlockingQueue.Node<E>(e);
final ReentrantLock putLock = this.putLock;
final AtomicInteger count = this.count;
putLock.lockInterruptibly();
try {
/*
* Note that count is used in wait guard even though it is
* not protected by lock. This works because count can
* only decrease at this point (all other puts are shut
* out by lock), and we (or some other waiting put) are
* signalled if it ever changes from capacity. Similarly
* for all other uses of count in other wait guards.
*/
while (count.get() == getCapacity()) {
notFull.await();
}
enqueue(node);
c = count.getAndIncrement();
if (c + 1 < getCapacity())
notFull.signal();
} finally {
putLock.unlock();
}
if (c == 0)
signalNotEmpty();
}
/**
* Inserts the specified element at the tail of this queue, waiting if
* necessary up to the specified wait time for space to become available.
*
* @return {@code true} if successful, or {@code false} if
* the specified waiting time elapses before space is available
* @throws InterruptedException {@inheritDoc}
* @throws NullPointerException {@inheritDoc}
*/
public boolean offer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
if (e == null) throw new NullPointerException();
long nanos = unit.toNanos(timeout);
int c = -1;
final ReentrantLock putLock = this.putLock;
final AtomicInteger count = this.count;
putLock.lockInterruptibly();
try {
while (count.get() == getCapacity()) {
if (nanos <= 0)
return false;
nanos = notFull.awaitNanos(nanos);
}
enqueue(new ResizableCapacityLinkedBlockingQueue.Node<E>(e));
c = count.getAndIncrement();
if (c + 1 < getCapacity())
notFull.signal();
} finally {
putLock.unlock();
}
if (c == 0)
signalNotEmpty();
return true;
}
/**
* Inserts the specified element at the tail of this queue if it is
* possible to do so immediately without exceeding the queue's capacity,
* returning {@code true} upon success and {@code false} if this queue
* is full.
* When using a capacity-restricted queue, this method is generally
* preferable to method {@link BlockingQueue#add add}, which can fail to
* insert an element only by throwing an exception.
*
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
if (e == null) throw new NullPointerException();
final AtomicInteger count = this.count;
if (count.get() == getCapacity())
return false;
int c = -1;
ResizableCapacityLinkedBlockingQueue.Node<E> node = new ResizableCapacityLinkedBlockingQueue.Node<E>(e);
final ReentrantLock putLock = this.putLock;
putLock.lock();
try {
if (count.get() < getCapacity()) {
enqueue(node);
c = count.getAndIncrement();
if (c + 1 < getCapacity())
notFull.signal();
}
} finally {
putLock.unlock();
}
if (c == 0)
signalNotEmpty();
return c >= 0;
}
public E take() throws InterruptedException {
E x;
int c = -1;
final AtomicInteger count = this.count;
final ReentrantLock takeLock = this.takeLock;
takeLock.lockInterruptibly();
try {
while (count.get() == 0) {
notEmpty.await();
}
x = dequeue();
c = count.getAndDecrement();
if (c > 1)
notEmpty.signal();
} finally {
takeLock.unlock();
}
if (c == getCapacity())
signalNotFull();
return x;
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
E x = null;
int c = -1;
long nanos = unit.toNanos(timeout);
final AtomicInteger count = this.count;
final ReentrantLock takeLock = this.takeLock;
takeLock.lockInterruptibly();
try {
while (count.get() == 0) {
if (nanos <= 0)
return null;
nanos = notEmpty.awaitNanos(nanos);
}
x = dequeue();
c = count.getAndDecrement();
if (c > 1)
notEmpty.signal();
} finally {
takeLock.unlock();
}
if (c == getCapacity())
signalNotFull();
return x;
}
public E poll() {
final AtomicInteger count = this.count;
if (count.get() == 0)
return null;
E x = null;
int c = -1;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
if (count.get() > 0) {
x = dequeue();
c = count.getAndDecrement();
if (c > 1)
notEmpty.signal();
}
} finally {
takeLock.unlock();
}
if (c == getCapacity())
signalNotFull();
return x;
}
public E peek() {
if (count.get() == 0)
return null;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
ResizableCapacityLinkedBlockingQueue.Node<E> first = head.next;
if (first == null)
return null;
else
return first.item;
} finally {
takeLock.unlock();
}
}
/**
* Unlinks interior Node p with predecessor trail.
*/
void unlink(ResizableCapacityLinkedBlockingQueue.Node<E> p, ResizableCapacityLinkedBlockingQueue.Node<E> trail) {
// assert isFullyLocked();
// p.next is not changed, to allow iterators that are
// traversing p to maintain their weak-consistency guarantee.
p.item = null;
trail.next = p.next;
if (last == p)
last = trail;
if (count.getAndDecrement() == getCapacity())
notFull.signal();
}
/**
* Removes a single instance of the specified element from this queue,
* if it is present. More formally, removes an element {@code e} such
* that {@code o.equals(e)}, if this queue contains one or more such
* elements.
* Returns {@code true} if this queue contained the specified element
* (or equivalently, if this queue changed as a result of the call).
*
* @param o element to be removed from this queue, if present
* @return {@code true} if this queue changed as a result of the call
*/
public boolean remove(Object o) {
if (o == null) return false;
fullyLock();
try {
for (ResizableCapacityLinkedBlockingQueue.Node<E> trail = head, p = trail.next;
p != null;
trail = p, p = p.next) {
if (o.equals(p.item)) {
unlink(p, trail);
return true;
}
}
return false;
} finally {
fullyUnlock();
}
}
/**
* Returns {@code true} if this queue contains the specified element.
* More formally, returns {@code true} if and only if this queue contains
* at least one element {@code e} such that {@code o.equals(e)}.
*
* @param o object to be checked for containment in this queue
* @return {@code true} if this queue contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
fullyLock();
try {
for (ResizableCapacityLinkedBlockingQueue.Node<E> p = head.next; p != null; p = p.next)
if (o.equals(p.item))
return true;
return false;
} finally {
fullyUnlock();
}
}
/**
* Returns an array containing all of the elements in this queue, in
* proper sequence.
*
* <p>The returned array will be "safe" in that no references to it are
* maintained by this queue. (In other words, this method must allocate
* a new array). The caller is thus free to modify the returned array.
*
* <p>This method acts as bridge between array-based and collection-based
* APIs.
*
* @return an array containing all of the elements in this queue
*/
public Object[] toArray() {
fullyLock();
try {
int size = count.get();
Object[] a = new Object[size];
int k = 0;
for (ResizableCapacityLinkedBlockingQueue.Node<E> p = head.next; p != null; p = p.next)
a[k++] = p.item;
return a;
} finally {
fullyUnlock();
}
}
/**
* Returns an array containing all of the elements in this queue, in
* proper sequence; the runtime type of the returned array is that of
* the specified array. If the queue fits in the specified array, it
* is returned therein. Otherwise, a new array is allocated with the
* runtime type of the specified array and the size of this queue.
*
* <p>If this queue fits in the specified array with room to spare
* (i.e., the array has more elements than this queue), the element in
* the array immediately following the end of the queue is set to
* {@code null}.
*
* <p>Like the {@link #toArray()} method, this method acts as bridge between
* array-based and collection-based APIs. Further, this method allows
* precise control over the runtime type of the output array, and may,
* under certain circumstances, be used to save allocation costs.
*
* <p>Suppose {@code x} is a queue known to contain only strings.
* The following code can be used to dump the queue into a newly
* allocated array of {@code String}:
*
* <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
* Note that {@code toArray(new Object[0])} is identical in function to
* {@code toArray()}.
*
* @param a the array into which the elements of the queue are to
* be stored, if it is big enough; otherwise, a new array of the
* same runtime type is allocated for this purpose
* @return an array containing all of the elements in this queue
* @throws ArrayStoreException if the runtime type of the specified array
* is not a supertype of the runtime type of every element in
* this queue
* @throws NullPointerException if the specified array is null
*/
@SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
fullyLock();
try {
int size = count.get();
if (a.length < size)
a = (T[])java.lang.reflect.Array.newInstance
(a.getClass().getComponentType(), size);
int k = 0;
for (ResizableCapacityLinkedBlockingQueue.Node<E> p = head.next; p != null; p = p.next)
a[k++] = (T)p.item;
if (a.length > k)
a[k] = null;
return a;
} finally {
fullyUnlock();
}
}
public String toString() {
fullyLock();
try {
ResizableCapacityLinkedBlockingQueue.Node<E> p = head.next;
if (p == null)
return "[]";
StringBuilder sb = new StringBuilder();
sb.append('[');
for (;;) {
E e = p.item;
sb.append(e == this ? "(this Collection)" : e);
p = p.next;
if (p == null)
return sb.append(']').toString();
sb.append(',').append(' ');
}
} finally {
fullyUnlock();
}
}
/**
* Atomically removes all of the elements from this queue.
* The queue will be empty after this call returns.
*/
public void clear() {
fullyLock();
try {
for (ResizableCapacityLinkedBlockingQueue.Node<E> p, h = head; (p = h.next) != null; h = p) {
h.next = h;
p.item = null;
}
head = last;
// assert head.item == null && head.next == null;
if (count.getAndSet(0) == getCapacity())
notFull.signal();
} finally {
fullyUnlock();
}
}
/**
* @throws UnsupportedOperationException {@inheritDoc}
* @throws ClassCastException {@inheritDoc}
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c) {
return drainTo(c, Integer.MAX_VALUE);
}
/**
* @throws UnsupportedOperationException {@inheritDoc}
* @throws ClassCastException {@inheritDoc}
* @throws NullPointerException {@inheritDoc}
* @throws IllegalArgumentException {@inheritDoc}
*/
public int drainTo(Collection<? super E> c, int maxElements) {
if (c == null)
throw new NullPointerException();
if (c == this)
throw new IllegalArgumentException();
if (maxElements <= 0)
return 0;
boolean signalNotFull = false;
final ReentrantLock takeLock = this.takeLock;
takeLock.lock();
try {
int n = Math.min(maxElements, count.get());
// count.get provides visibility to first n Nodes
ResizableCapacityLinkedBlockingQueue.Node<E> h = head;
int i = 0;
try {
while (i < n) {
ResizableCapacityLinkedBlockingQueue.Node<E> p = h.next;
c.add(p.item);
p.item = null;
h.next = h;
h = p;
++i;
}
return n;
} finally {
// Restore invariants even if c.add() threw
if (i > 0) {
// assert h.item == null;
head = h;
signalNotFull = (count.getAndAdd(-i) == getCapacity());
}
}
} finally {
takeLock.unlock();
if (signalNotFull)
signalNotFull();
}
}
/**
* Returns an iterator over the elements in this queue in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
*
* @return an iterator over the elements in this queue in proper sequence
*/
public Iterator<E> iterator() {
return new ResizableCapacityLinkedBlockingQueue.Itr();
}
private class Itr implements Iterator<E> {
/*
* Basic weakly-consistent iterator. At all times hold the next
* item to hand out so that if hasNext() reports true, we will
* still have it to return even if lost race with a take etc.
*/
private ResizableCapacityLinkedBlockingQueue.Node<E> current;
private ResizableCapacityLinkedBlockingQueue.Node<E> lastRet;
private E currentElement;
Itr() {
fullyLock();
try {
current = head.next;
if (current != null)
currentElement = current.item;
} finally {
fullyUnlock();
}
}
public boolean hasNext() {
return current != null;
}
/**
* Returns the next live successor of p, or null if no such.
*
* Unlike other traversal methods, iterators need to handle both:
* - dequeued nodes (p.next == p)
* - (possibly multiple) interior removed nodes (p.item == null)
*/
private ResizableCapacityLinkedBlockingQueue.Node<E> nextNode(ResizableCapacityLinkedBlockingQueue.Node<E> p) {
for (;;) {
ResizableCapacityLinkedBlockingQueue.Node<E> s = p.next;
if (s == p)
return head.next;
if (s == null || s.item != null)
return s;
p = s;
}
}
public E next() {
fullyLock();
try {
if (current == null)
throw new NoSuchElementException();
E x = currentElement;
lastRet = current;
current = nextNode(current);
currentElement = (current == null) ? null : current.item;
return x;
} finally {
fullyUnlock();
}
}
public void remove() {
if (lastRet == null)
throw new IllegalStateException();
fullyLock();
try {
ResizableCapacityLinkedBlockingQueue.Node<E> node = lastRet;
lastRet = null;
for (ResizableCapacityLinkedBlockingQueue.Node<E> trail = head, p = trail.next;
p != null;
trail = p, p = p.next) {
if (p == node) {
unlink(p, trail);
break;
}
}
} finally {
fullyUnlock();
}
}
}
/** A customized variant of Spliterators.IteratorSpliterator */
static final class LBQSpliterator<E> implements Spliterator<E> {
static final int MAX_BATCH = 1 << 25; // max batch array size;
final ResizableCapacityLinkedBlockingQueue<E> queue;
ResizableCapacityLinkedBlockingQueue.Node<E> current; // current node; null until initialized
int batch; // batch size for splits
boolean exhausted; // true when no more nodes
long est; // size estimate
LBQSpliterator(ResizableCapacityLinkedBlockingQueue<E> queue) {
this.queue = queue;
this.est = queue.size();
}
public long estimateSize() { return est; }
public Spliterator<E> trySplit() {
ResizableCapacityLinkedBlockingQueue.Node<E> h;
final ResizableCapacityLinkedBlockingQueue<E> q = this.queue;
int b = batch;
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1;
if (!exhausted &&
((h = current) != null || (h = q.head.next) != null) &&
h.next != null) {
Object[] a = new Object[n];
int i = 0;
ResizableCapacityLinkedBlockingQueue.Node<E> p = current;
q.fullyLock();
try {
if (p != null || (p = q.head.next) != null) {
do {
if ((a[i] = p.item) != null)
++i;
} while ((p = p.next) != null && i < n);
}
} finally {
q.fullyUnlock();
}
if ((current = p) == null) {
est = 0L;
exhausted = true;
}
else if ((est -= i) < 0L)
est = 0L;
if (i > 0) {
batch = i;
return Spliterators.spliterator
(a, 0, i, Spliterator.ORDERED | Spliterator.NONNULL |
Spliterator.CONCURRENT);
}
}
return null;
}
public void forEachRemaining(Consumer<? super E> action) {
if (action == null) throw new NullPointerException();
final ResizableCapacityLinkedBlockingQueue<E> q = this.queue;
if (!exhausted) {
exhausted = true;
ResizableCapacityLinkedBlockingQueue.Node<E> p = current;
do {
E e = null;
q.fullyLock();
try {
if (p == null)
p = q.head.next;
while (p != null) {
e = p.item;
p = p.next;
if (e != null)
break;
}
} finally {
q.fullyUnlock();
}
if (e != null)
action.accept(e);
} while (p != null);
}
}
public boolean tryAdvance(Consumer<? super E> action) {
if (action == null) throw new NullPointerException();
final ResizableCapacityLinkedBlockingQueue<E> q = this.queue;
if (!exhausted) {
E e = null;
q.fullyLock();
try {
if (current == null)
current = q.head.next;
while (current != null) {
e = current.item;
current = current.next;
if (e != null)
break;
}
} finally {
q.fullyUnlock();
}
if (current == null)
exhausted = true;
if (e != null) {
action.accept(e);
return true;
}
}
return false;
}
public int characteristics() {
return Spliterator.ORDERED | Spliterator.NONNULL |
Spliterator.CONCURRENT;
}
}
/**
* Returns a {@link Spliterator} over the elements in this queue.
*
* <p>The returned spliterator is
* <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
*
* <p>The {@code Spliterator} reports {@link Spliterator#CONCURRENT},
* {@link Spliterator#ORDERED}, and {@link Spliterator#NONNULL}.
*
* @implNote
* The {@code Spliterator} implements {@code trySplit} to permit limited
* parallelism.
*
* @return a {@code Spliterator} over the elements in this queue
* @since 1.8
*/
public Spliterator<E> spliterator() {
return new ResizableCapacityLinkedBlockingQueue.LBQSpliterator<E>(this);
}
/**
* Saves this queue to a stream (that is, serializes it).
*
* @param s the stream
* @throws java.io.IOException if an I/O error occurs
* @serialData The capacity is emitted (int), followed by all of
* its elements (each an {@code Object}) in the proper order,
* followed by a null
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
fullyLock();
try {
// Write out any hidden stuff, plus capacity
s.defaultWriteObject();
// Write out all elements in the proper order.
for (ResizableCapacityLinkedBlockingQueue.Node<E> p = head.next; p != null; p = p.next)
s.writeObject(p.item);
// Use trailing null as sentinel
s.writeObject(null);
} finally {
fullyUnlock();
}
}
/**
* Reconstitutes this queue from a stream (that is, deserializes it).
* @param s the stream
* @throws ClassNotFoundException if the class of a serialized object
* could not be found
* @throws java.io.IOException if an I/O error occurs
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
// Read in capacity, and any hidden stuff
s.defaultReadObject();
count.set(0);
last = head = new ResizableCapacityLinkedBlockingQueue.Node<E>(null);
// Read in all elements and place in queue
for (;;) {
@SuppressWarnings("unchecked")
E item = (E)s.readObject();
if (item == null)
break;
add(item);
}
}
public int getCapacity() {
readWriteLock.readLock().lock();
try {
return capacity;
}finally {
readWriteLock.readLock().unlock();
}
}
public void setCapacity(int capacity) {
if (capacity <= 0) throw new IllegalArgumentException();
readWriteLock.writeLock().lock();
try {
this.capacity = capacity;
}finally {
readWriteLock.writeLock().unlock();
}
}
}
测试类
import java.util.concurrent.TimeUnit;
/**
* [自定义线程池测试]
*
* @author : [Administrator]
* @version : [v1.0]
* @createTime : [2023/2/12 23:31]
*/
public class DynamicThreadPoolExecutorTest {
public static void main(String[] args) throws Exception{
DynamicThreadPoolExecutor johnThreadPool = DynamicThreadPoolExecutorManager.createThreadPoolExecutor("johnThreadPool",
10, 20, 0L, TimeUnit.SECONDS, 10);
DynamicThreadPoolExecutorManager.printThreadPoolExecutor("johnThreadPool");
DynamicThreadPoolExecutorManager.printThreadPoolExecutorStatus("johnThreadPool");
for (int i = 0; i < 30; i++) {
johnThreadPool.execute(()->{
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
},"getArticle");
}
Thread.sleep(5000);
System.out.println("沉睡了5s----------------------------------");
DynamicThreadPoolExecutorManager.refreshThreadPoolExecutor("johnThreadPool",
20,30,0L,TimeUnit.SECONDS,10);
DynamicThreadPoolExecutorManager.printThreadPoolExecutor("johnThreadPool");
DynamicThreadPoolExecutorManager.printThreadPoolExecutorStatus("johnThreadPool");
System.out.println("log:"+johnThreadPool.getTransactionMap());
}
}
测试结果
在此特别感谢King老师的视频和文档讲解的动态化线程池的实现。
最后
以上就是贪玩睫毛为你收集整理的并发编程优化之动态化线程池并发编程优化之动态化线程池的全部内容,希望文章能够帮你解决并发编程优化之动态化线程池并发编程优化之动态化线程池所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
发表评论 取消回复