mirror of
https://github.com/PaperMC/Paper.git
synced 2024-11-25 12:05:53 +01:00
10548 lines
365 KiB
Diff
10548 lines
365 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
|
Date: Sun, 23 Jan 2022 22:58:11 -0800
|
|
Subject: [PATCH] ConcurrentUtil
|
|
|
|
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..f84a622dc29750139ac280f480b7cd132b036287
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
|
|
@@ -0,0 +1,1421 @@
|
|
+package ca.spottedleaf.concurrentutil.collection;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Validate;
|
|
+
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Collection;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.NoSuchElementException;
|
|
+import java.util.Queue;
|
|
+import java.util.Spliterator;
|
|
+import java.util.Spliterators;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.IntFunction;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+/**
|
|
+ * MT-Safe linked first in first out ordered queue.
|
|
+ *
|
|
+ * This queue should out-perform {@link java.util.concurrent.ConcurrentLinkedQueue} in high-contention reads/writes, and is
|
|
+ * not any slower in lower contention reads/writes.
|
|
+ * <p>
|
|
+ * Note that this queue breaks the specification laid out by {@link Collection}, see {@link #preventAdds()} and {@link Collection#add(Object)}.
|
|
+ * </p>
|
|
+ * <p><b>
|
|
+ * This queue will only unlink linked nodes through the {@link #peek()} and {@link #poll()} methods, and this is only if
|
|
+ * they are at the head of the queue.
|
|
+ * </b></p>
|
|
+ * @param <E> Type of element in this queue.
|
|
+ */
|
|
+public class MultiThreadedQueue<E> implements Queue<E> {
|
|
+
|
|
+ protected volatile LinkedNode<E> head; /* Always non-null, high chance of being the actual head */
|
|
+
|
|
+ protected volatile LinkedNode<E> tail; /* Always non-null, high chance of being the actual tail */
|
|
+
|
|
+ /* Note that it is possible to reach head from tail. */
|
|
+
|
|
+ /* IMPL NOTE: Leave hashCode and equals to their defaults */
|
|
+
|
|
+ protected static final VarHandle HEAD_HANDLE = ConcurrentUtil.getVarHandle(MultiThreadedQueue.class, "head", LinkedNode.class);
|
|
+ protected static final VarHandle TAIL_HANDLE = ConcurrentUtil.getVarHandle(MultiThreadedQueue.class, "tail", LinkedNode.class);
|
|
+
|
|
+ /* head */
|
|
+
|
|
+ protected final void setHeadPlain(final LinkedNode<E> newHead) {
|
|
+ HEAD_HANDLE.set(this, newHead);
|
|
+ }
|
|
+
|
|
+ protected final void setHeadOpaque(final LinkedNode<E> newHead) {
|
|
+ HEAD_HANDLE.setOpaque(this, newHead);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getHeadPlain() {
|
|
+ return (LinkedNode<E>)HEAD_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getHeadOpaque() {
|
|
+ return (LinkedNode<E>)HEAD_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getHeadAcquire() {
|
|
+ return (LinkedNode<E>)HEAD_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ /* tail */
|
|
+
|
|
+ protected final void setTailPlain(final LinkedNode<E> newTail) {
|
|
+ TAIL_HANDLE.set(this, newTail);
|
|
+ }
|
|
+
|
|
+ protected final void setTailOpaque(final LinkedNode<E> newTail) {
|
|
+ TAIL_HANDLE.setOpaque(this, newTail);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getTailPlain() {
|
|
+ return (LinkedNode<E>)TAIL_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getTailOpaque() {
|
|
+ return (LinkedNode<E>)TAIL_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs a {@code MultiThreadedQueue}, initially empty.
|
|
+ * <p>
|
|
+ * The returned object may not be published without synchronization.
|
|
+ * </p>
|
|
+ */
|
|
+ public MultiThreadedQueue() {
|
|
+ final LinkedNode<E> value = new LinkedNode<>(null, null);
|
|
+ this.setHeadPlain(value);
|
|
+ this.setTailPlain(value);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs a {@code MultiThreadedQueue}, initially containing all elements in the specified {@code collection}.
|
|
+ * <p>
|
|
+ * The returned object may not be published without synchronization.
|
|
+ * </p>
|
|
+ * @param collection The specified collection.
|
|
+ * @throws NullPointerException If {@code collection} is {@code null} or contains {@code null} elements.
|
|
+ */
|
|
+ public MultiThreadedQueue(final Iterable<? extends E> collection) {
|
|
+ final Iterator<? extends E> elements = collection.iterator();
|
|
+
|
|
+ if (!elements.hasNext()) {
|
|
+ final LinkedNode<E> value = new LinkedNode<>(null, null);
|
|
+ this.setHeadPlain(value);
|
|
+ this.setTailPlain(value);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final LinkedNode<E> head = new LinkedNode<>(Validate.notNull(elements.next(), "Null element"), null);
|
|
+ LinkedNode<E> tail = head;
|
|
+
|
|
+ while (elements.hasNext()) {
|
|
+ final LinkedNode<E> next = new LinkedNode<>(Validate.notNull(elements.next(), "Null element"), null);
|
|
+ tail.setNextPlain(next);
|
|
+ tail = next;
|
|
+ }
|
|
+
|
|
+ this.setHeadPlain(head);
|
|
+ this.setTailPlain(tail);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public E remove() throws NoSuchElementException {
|
|
+ final E ret = this.poll();
|
|
+
|
|
+ if (ret == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * <p>
|
|
+ * Contrary to the specification of {@link Collection#add}, this method will fail to add the element to this queue
|
|
+ * and return {@code false} if this queue is add-blocked.
|
|
+ * </p>
|
|
+ */
|
|
+ @Override
|
|
+ public boolean add(final E element) {
|
|
+ return this.offer(element);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds the specified element to the tail of this queue. If this queue is currently add-locked, then the queue is
|
|
+ * released from that lock and this element is added. The unlock operation and addition of the specified
|
|
+ * element is atomic.
|
|
+ * @param element The specified element.
|
|
+ * @return {@code true} if this queue previously allowed additions
|
|
+ */
|
|
+ public boolean forceAdd(final E element) {
|
|
+ final LinkedNode<E> node = new LinkedNode<>(element, null);
|
|
+
|
|
+ return !this.forceAppendList(node, node);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public E element() throws NoSuchElementException {
|
|
+ final E ret = this.peek();
|
|
+
|
|
+ if (ret == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * <p>
|
|
+ * This method may also return {@code false} to indicate an element was not added if this queue is add-blocked.
|
|
+ * </p>
|
|
+ */
|
|
+ @Override
|
|
+ public boolean offer(final E element) {
|
|
+ Validate.notNull(element, "Null element");
|
|
+
|
|
+ final LinkedNode<E> node = new LinkedNode<>(element, null);
|
|
+
|
|
+ return this.appendList(node, node);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public E peek() {
|
|
+ for (LinkedNode<E> head = this.getHeadOpaque(), curr = head;;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ if (this.getHeadOpaque() == head && curr != head) {
|
|
+ this.setHeadOpaque(curr);
|
|
+ }
|
|
+ return element;
|
|
+ }
|
|
+
|
|
+ if (next == null || curr == next) {
|
|
+ return null;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public E poll() {
|
|
+ return this.removeHead();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Retrieves and removes the head of this queue if it matches the specified predicate. If this queue is empty
|
|
+ * or the head does not match the predicate, this function returns {@code null}.
|
|
+ * <p>
|
|
+ * The predicate may be invoked multiple or no times in this call.
|
|
+ * </p>
|
|
+ * @param predicate The specified predicate.
|
|
+ * @return The head if it matches the predicate, or {@code null} if it did not or this queue is empty.
|
|
+ */
|
|
+ public E pollIf(final Predicate<E> predicate) {
|
|
+ return this.removeHead(Validate.notNull(predicate, "Null predicate"));
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void clear() {
|
|
+ //noinspection StatementWithEmptyBody
|
|
+ while (this.poll() != null);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Prevents elements from being added to this queue. Once this is called, any attempt to add to this queue will fail.
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * @return {@code true} if the queue was modified to prevent additions, {@code false} if it already prevented additions.
|
|
+ */
|
|
+ public boolean preventAdds() {
|
|
+ final LinkedNode<E> deadEnd = new LinkedNode<>(null, null);
|
|
+ deadEnd.setNextPlain(deadEnd);
|
|
+
|
|
+ if (!this.appendList(deadEnd, deadEnd)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.setTailPlain(deadEnd); /* (try to) Ensure tail is set for the following #allowAdds call */
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Allows elements to be added to this queue once again. Note that this function has undefined behaviour if
|
|
+ * {@link #preventAdds()} is not called beforehand. The benefit of this function over {@link #tryAllowAdds()}
|
|
+ * is that this function might perform better.
|
|
+ * <p>
|
|
+ * This function is not MT-Safe.
|
|
+ * </p>
|
|
+ */
|
|
+ public void allowAdds() {
|
|
+ LinkedNode<E> tail = this.getTailPlain();
|
|
+
|
|
+ /* We need to find the tail given the cas on tail isn't atomic (nor volatile) in this.appendList */
|
|
+ /* Thus it is possible for an outdated tail to be set */
|
|
+ while (tail != (tail = tail.getNextPlain())) {}
|
|
+
|
|
+ tail.setNextVolatile(null);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Tries to allow elements to be added to this queue. Returns {@code true} if the queue was previous add-locked,
|
|
+ * {@code false} otherwise.
|
|
+ * <p>
|
|
+ * This function is MT-Safe, however it should not be used with {@link #allowAdds()}.
|
|
+ * </p>
|
|
+ * @return {@code true} if the queue was previously add-locked, {@code false} otherwise.
|
|
+ */
|
|
+ public boolean tryAllowAdds() {
|
|
+ LinkedNode<E> tail = this.getTailPlain();
|
|
+
|
|
+ for (int failures = 0;;) {
|
|
+ /* We need to find the tail given the cas on tail isn't atomic (nor volatile) in this.appendList */
|
|
+ /* Thus it is possible for an outdated tail to be set */
|
|
+ while (tail != (tail = tail.getNextAcquire())) {
|
|
+ if (tail == null) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (tail == (tail = tail.compareExchangeNextVolatile(tail, null))) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (tail == null) {
|
|
+ return false;
|
|
+ }
|
|
+ ++failures;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically adds the specified element to this queue or allows additions to the queue. If additions
|
|
+ * are not allowed, the element is not added.
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * @param element The specified element.
|
|
+ * @return {@code true} if the queue now allows additions, {@code false} if the element was added.
|
|
+ */
|
|
+ public boolean addOrAllowAdds(final E element) {
|
|
+ Validate.notNull(element, "Null element");
|
|
+ int failures = 0;
|
|
+
|
|
+ final LinkedNode<E> append = new LinkedNode<>(element, null);
|
|
+
|
|
+ for (LinkedNode<E> currTail = this.getTailOpaque(), curr = currTail;;) {
|
|
+ /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
|
|
+ /* It is likely due to a cache miss caused by another write to the next field */
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (next == null) {
|
|
+ final LinkedNode<E> compared = curr.compareExchangeNextVolatile(null, append);
|
|
+
|
|
+ if (compared == null) {
|
|
+ /* Added */
|
|
+ /* Avoid CASing on tail more than we need to */
|
|
+ /* CAS to avoid setting an out-of-date tail */
|
|
+ if (this.getTailOpaque() == currTail) {
|
|
+ this.setTailOpaque(append);
|
|
+ }
|
|
+ return false; // we added
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ curr = compared;
|
|
+ continue;
|
|
+ } else if (next == curr) {
|
|
+ final LinkedNode<E> compared = curr.compareExchangeNextVolatile(curr, null);
|
|
+
|
|
+ if (compared == curr) {
|
|
+ return true; // we let additions through
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+
|
|
+ if (compared != null) {
|
|
+ curr = compared;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (curr == currTail) {
|
|
+ /* Tail is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to tail */
|
|
+ if (currTail == (currTail = this.getTailOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = currTail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether this queue is currently add-blocked. That is, whether {@link #add(Object)} and friends will return {@code false}.
|
|
+ */
|
|
+ public boolean isAddBlocked() {
|
|
+ for (LinkedNode<E> tail = this.getTailOpaque();;) {
|
|
+ LinkedNode<E> next = tail.getNextVolatile();
|
|
+ if (next == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (next == tail) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ tail = next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically removes the head from this queue if it exists, otherwise prevents additions to this queue if no
|
|
+ * head is removed.
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * If the queue is already add-blocked and empty then no operation is performed.
|
|
+ * @return {@code null} if the queue is now add-blocked or was previously add-blocked, else returns
|
|
+ * an non-null value which was the previous head of queue.
|
|
+ */
|
|
+ public E pollOrBlockAdds() {
|
|
+ int failures = 0;
|
|
+ for (LinkedNode<E> head = this.getHeadOpaque(), curr = head;;) {
|
|
+ final E currentVal = curr.getElementVolatile();
|
|
+ final LinkedNode<E> next = curr.getNextOpaque();
|
|
+
|
|
+ if (next == curr) {
|
|
+ return null; /* Additions are already blocked */
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (currentVal != null) {
|
|
+ if (curr.getAndSetElementVolatile(null) == null) {
|
|
+ ++failures;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* "CAS" to avoid setting an out-of-date head */
|
|
+ if (this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(next != null ? next : curr);
|
|
+ }
|
|
+
|
|
+ return currentVal;
|
|
+ }
|
|
+
|
|
+ if (next == null) {
|
|
+ /* Try to update stale head */
|
|
+ if (curr != head && this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(curr);
|
|
+ }
|
|
+
|
|
+ final LinkedNode<E> compared = curr.compareExchangeNextVolatile(null, curr);
|
|
+
|
|
+ if (compared != null) {
|
|
+ // failed to block additions
|
|
+ curr = compared;
|
|
+ ++failures;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ return null; /* We blocked additions */
|
|
+ }
|
|
+
|
|
+ if (head == curr) {
|
|
+ /* head is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to head */
|
|
+ if (head == (head = this.getHeadOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = head;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean remove(final Object object) {
|
|
+ Validate.notNull(object, "Null object to remove");
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ if ((element == object || element.equals(object)) && curr.getAndSetElementVolatile(null) == element) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (next == curr || next == null) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean removeIf(final Predicate<? super E> filter) {
|
|
+ Validate.notNull(filter, "Null filter");
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ ret |= filter.test(element) && curr.getAndSetElementVolatile(null) == element;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean removeAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ /* Volatile is required to synchronize with the write to the first element */
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ ret |= collection.contains(element) && curr.getAndSetElementVolatile(null) == element;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean retainAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ ret |= !collection.contains(element) && curr.getAndSetElementVolatile(null) == element;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public Object[] toArray() {
|
|
+ final List<E> ret = new ArrayList<>();
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ ret.add(element);
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret.toArray();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public <T> T[] toArray(final T[] array) {
|
|
+ final List<T> ret = new ArrayList<>();
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ //noinspection unchecked
|
|
+ ret.add((T)element);
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret.toArray(array);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public <T> T[] toArray(final IntFunction<T[]> generator) {
|
|
+ Validate.notNull(generator, "Null generator");
|
|
+
|
|
+ final List<T> ret = new ArrayList<>();
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ //noinspection unchecked
|
|
+ ret.add((T)element);
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return ret.toArray(generator);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ final StringBuilder builder = new StringBuilder();
|
|
+
|
|
+ builder.append("MultiThreadedQueue: {elements: {");
|
|
+
|
|
+ int deadEntries = 0;
|
|
+ int totalEntries = 0;
|
|
+ int aliveEntries = 0;
|
|
+
|
|
+ boolean addLocked = false;
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();; ++totalEntries) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element == null) {
|
|
+ ++deadEntries;
|
|
+ } else {
|
|
+ ++aliveEntries;
|
|
+ }
|
|
+
|
|
+ if (totalEntries != 0) {
|
|
+ builder.append(", ");
|
|
+ }
|
|
+
|
|
+ builder.append(totalEntries).append(": \"").append(element).append('"');
|
|
+
|
|
+ if (next == null) {
|
|
+ break;
|
|
+ }
|
|
+ if (curr == next) {
|
|
+ addLocked = true;
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ builder.append("}, total_entries: \"").append(totalEntries).append("\", alive_entries: \"").append(aliveEntries)
|
|
+ .append("\", dead_entries:").append(deadEntries).append("\", add_locked: \"").append(addLocked)
|
|
+ .append("\"}");
|
|
+
|
|
+ return builder.toString();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds all elements from the specified collection to this queue. The addition is atomic.
|
|
+ * @param collection The specified collection.
|
|
+ * @return {@code true} if all elements were added successfully, or {@code false} if this queue is add-blocked, or
|
|
+ * {@code false} if the specified collection contains no elements.
|
|
+ */
|
|
+ @Override
|
|
+ public boolean addAll(final Collection<? extends E> collection) {
|
|
+ return this.addAll((Iterable<? extends E>)collection);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds all elements from the specified iterable object to this queue. The addition is atomic.
|
|
+ * @param iterable The specified iterable object.
|
|
+ * @return {@code true} if all elements were added successfully, or {@code false} if this queue is add-blocked, or
|
|
+ * {@code false} if the specified iterable contains no elements.
|
|
+ */
|
|
+ public boolean addAll(final Iterable<? extends E> iterable) {
|
|
+ Validate.notNull(iterable, "Null iterable");
|
|
+
|
|
+ final Iterator<? extends E> elements = iterable.iterator();
|
|
+ if (!elements.hasNext()) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /* Build a list of nodes to append */
|
|
+ /* This is an much faster due to the fact that zero additional synchronization is performed */
|
|
+
|
|
+ final LinkedNode<E> head = new LinkedNode<>(Validate.notNull(elements.next(), "Null element"), null);
|
|
+ LinkedNode<E> tail = head;
|
|
+
|
|
+ while (elements.hasNext()) {
|
|
+ final LinkedNode<E> next = new LinkedNode<>(Validate.notNull(elements.next(), "Null element"), null);
|
|
+ tail.setNextPlain(next);
|
|
+ tail = next;
|
|
+ }
|
|
+
|
|
+ return this.appendList(head, tail);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds all of the elements from the specified array to this queue.
|
|
+ * @param items The specified array.
|
|
+ * @return {@code true} if all elements were added successfully, or {@code false} if this queue is add-blocked, or
|
|
+ * {@code false} if the specified array has a length of 0.
|
|
+ */
|
|
+ public boolean addAll(final E[] items) {
|
|
+ return this.addAll(items, 0, items.length);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds all of the elements from the specified array to this queue.
|
|
+ * @param items The specified array.
|
|
+ * @param off The offset in the array.
|
|
+ * @param len The number of items.
|
|
+ * @return {@code true} if all elements were added successfully, or {@code false} if this queue is add-blocked, or
|
|
+ * {@code false} if the specified array has a length of 0.
|
|
+ */
|
|
+ public boolean addAll(final E[] items, final int off, final int len) {
|
|
+ Validate.notNull(items, "Items may not be null");
|
|
+ Validate.arrayBounds(off, len, items.length, "Items array indices out of bounds");
|
|
+
|
|
+ if (len == 0) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final LinkedNode<E> head = new LinkedNode<>(Validate.notNull(items[off], "Null element"), null);
|
|
+ LinkedNode<E> tail = head;
|
|
+
|
|
+ for (int i = 1; i < len; ++i) {
|
|
+ final LinkedNode<E> next = new LinkedNode<>(Validate.notNull(items[off + i], "Null element"), null);
|
|
+ tail.setNextPlain(next);
|
|
+ tail = next;
|
|
+ }
|
|
+
|
|
+ return this.appendList(head, tail);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean containsAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ for (final Object element : collection) {
|
|
+ if (!this.contains(element)) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public Iterator<E> iterator() {
|
|
+ return new LinkedIterator<>(this.getHeadOpaque());
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * <p>
|
|
+ * Note that this function is computed non-atomically and in O(n) time. The value returned may not be representative of
|
|
+ * the queue in its current state.
|
|
+ * </p>
|
|
+ */
|
|
+ @Override
|
|
+ public int size() {
|
|
+ int size = 0;
|
|
+
|
|
+ /* Volatile is required to synchronize with the write to the first element */
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ ++size;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return size;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean isEmpty() {
|
|
+ return this.peek() == null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean contains(final Object object) {
|
|
+ Validate.notNull(object, "Null object");
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null && (element == object || element.equals(object))) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Finds the first element in this queue that matches the predicate.
|
|
+ * @param predicate The predicate to test elements against.
|
|
+ * @return The first element that matched the predicate, {@code null} if none matched.
|
|
+ */
|
|
+ public E find(final Predicate<E> predicate) {
|
|
+ Validate.notNull(predicate, "Null predicate");
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null && predicate.test(element)) {
|
|
+ return element;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void forEach(final Consumer<? super E> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ for (LinkedNode<E> curr = this.getHeadOpaque();;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E element = curr.getElementPlain(); /* Likely in sync */
|
|
+
|
|
+ if (element != null) {
|
|
+ action.accept(element);
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // return true if normal addition, false if the queue previously disallowed additions
|
|
+ protected final boolean forceAppendList(final LinkedNode<E> head, final LinkedNode<E> tail) {
|
|
+ int failures = 0;
|
|
+
|
|
+ for (LinkedNode<E> currTail = this.getTailOpaque(), curr = currTail;;) {
|
|
+ /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
|
|
+ /* It is likely due to a cache miss caused by another write to the next field */
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ final LinkedNode<E> compared = curr.compareExchangeNextVolatile(next, head);
|
|
+
|
|
+ if (compared == next) {
|
|
+ /* Added */
|
|
+ /* Avoid CASing on tail more than we need to */
|
|
+ /* "CAS" to avoid setting an out-of-date tail */
|
|
+ if (this.getTailOpaque() == currTail) {
|
|
+ this.setTailOpaque(tail);
|
|
+ }
|
|
+ return next != curr;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ curr = compared;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (curr == currTail) {
|
|
+ /* Tail is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to tail */
|
|
+ if (currTail == (currTail = this.getTailOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = currTail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // return true if successful, false otherwise
|
|
+ protected final boolean appendList(final LinkedNode<E> head, final LinkedNode<E> tail) {
|
|
+ int failures = 0;
|
|
+
|
|
+ for (LinkedNode<E> currTail = this.getTailOpaque(), curr = currTail;;) {
|
|
+ /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
|
|
+ /* It is likely due to a cache miss caused by another write to the next field */
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ if (next == curr) {
|
|
+ /* Additions are stopped */
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (next == null) {
|
|
+ final LinkedNode<E> compared = curr.compareExchangeNextVolatile(null, head);
|
|
+
|
|
+ if (compared == null) {
|
|
+ /* Added */
|
|
+ /* Avoid CASing on tail more than we need to */
|
|
+ /* CAS to avoid setting an out-of-date tail */
|
|
+ if (this.getTailOpaque() == currTail) {
|
|
+ this.setTailOpaque(tail);
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ curr = compared;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (curr == currTail) {
|
|
+ /* Tail is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to tail */
|
|
+ if (currTail == (currTail = this.getTailOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = currTail;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final E removeHead(final Predicate<E> predicate) {
|
|
+ int failures = 0;
|
|
+ for (LinkedNode<E> head = this.getHeadOpaque(), curr = head;;) {
|
|
+ // volatile here synchronizes-with writes to element
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E currentVal = curr.getElementPlain();
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (currentVal != null) {
|
|
+ if (!predicate.test(currentVal)) {
|
|
+ /* Try to update stale head */
|
|
+ if (curr != head && this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(curr);
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+ if (curr.getAndSetElementVolatile(null) == null) {
|
|
+ /* Failed to get head */
|
|
+ if (curr == (curr = next) || next == null) {
|
|
+ return null;
|
|
+ }
|
|
+ ++failures;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* "CAS" to avoid setting an out-of-date head */
|
|
+ if (this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(next != null ? next : curr);
|
|
+ }
|
|
+
|
|
+ return currentVal;
|
|
+ }
|
|
+
|
|
+ if (curr == next || next == null) {
|
|
+ /* Try to update stale head */
|
|
+ if (curr != head && this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(curr);
|
|
+ }
|
|
+ return null; /* End of queue */
|
|
+ }
|
|
+
|
|
+ if (head == curr) {
|
|
+ /* head is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to head */
|
|
+ if (head == (head = this.getHeadOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = head;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final E removeHead() {
|
|
+ int failures = 0;
|
|
+ for (LinkedNode<E> head = this.getHeadOpaque(), curr = head;;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+ final E currentVal = curr.getElementPlain();
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (currentVal != null) {
|
|
+ if (curr.getAndSetElementVolatile(null) == null) {
|
|
+ /* Failed to get head */
|
|
+ if (curr == (curr = next) || next == null) {
|
|
+ return null;
|
|
+ }
|
|
+ ++failures;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* "CAS" to avoid setting an out-of-date head */
|
|
+ if (this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(next != null ? next : curr);
|
|
+ }
|
|
+
|
|
+ return currentVal;
|
|
+ }
|
|
+
|
|
+ if (curr == next || next == null) {
|
|
+ /* Try to update stale head */
|
|
+ if (curr != head && this.getHeadOpaque() == head) {
|
|
+ this.setHeadOpaque(curr);
|
|
+ }
|
|
+ return null; /* End of queue */
|
|
+ }
|
|
+
|
|
+ if (head == curr) {
|
|
+ /* head is likely not up-to-date */
|
|
+ curr = next;
|
|
+ } else {
|
|
+ /* Try to update to head */
|
|
+ if (head == (head = this.getHeadOpaque())) {
|
|
+ curr = next;
|
|
+ } else {
|
|
+ curr = head;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Empties the queue into the specified consumer. This function is optimized for single-threaded reads, and should
|
|
+ * be faster than a loop on {@link #poll()}.
|
|
+ * <p>
|
|
+ * This function is not MT-Safe. This function cannot be called with other read operations ({@link #peek()}, {@link #poll()},
|
|
+ * {@link #clear()}, etc).
|
|
+ * Write operations are safe to be called concurrently.
|
|
+ * </p>
|
|
+ * @param consumer The consumer to accept the elements.
|
|
+ * @return The total number of elements drained.
|
|
+ */
|
|
+ public int drain(final Consumer<E> consumer) {
|
|
+ return this.drain(consumer, false, ConcurrentUtil::rethrow);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Empties the queue into the specified consumer. This function is optimized for single-threaded reads, and should
|
|
+ * be faster than a loop on {@link #poll()}.
|
|
+ * <p>
|
|
+ * If {@code preventAdds} is {@code true}, then after this function returns the queue is guaranteed to be empty and
|
|
+ * additions to the queue will fail.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * This function is not MT-Safe. This function cannot be called with other read operations ({@link #peek()}, {@link #poll()},
|
|
+ * {@link #clear()}, etc).
|
|
+ * Write operations are safe to be called concurrently.
|
|
+ * </p>
|
|
+ * @param consumer The consumer to accept the elements.
|
|
+ * @param preventAdds Whether to prevent additions to this queue after draining.
|
|
+ * @return The total number of elements drained.
|
|
+ */
|
|
+ public int drain(final Consumer<E> consumer, final boolean preventAdds) {
|
|
+ return this.drain(consumer, preventAdds, ConcurrentUtil::rethrow);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Empties the queue into the specified consumer. This function is optimized for single-threaded reads, and should
|
|
+ * be faster than a loop on {@link #poll()}.
|
|
+ * <p>
|
|
+ * If {@code preventAdds} is {@code true}, then after this function returns the queue is guaranteed to be empty and
|
|
+ * additions to the queue will fail.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * This function is not MT-Safe. This function cannot be called with other read operations ({@link #peek()}, {@link #poll()},
|
|
+ * {@link #clear()}, {@link #remove(Object)} etc).
|
|
+ * Only write operations are safe to be called concurrently.
|
|
+ * </p>
|
|
+ * @param consumer The consumer to accept the elements.
|
|
+ * @param preventAdds Whether to prevent additions to this queue after draining.
|
|
+ * @param exceptionHandler Invoked when the consumer raises an exception.
|
|
+ * @return The total number of elements drained.
|
|
+ */
|
|
+ public int drain(final Consumer<E> consumer, final boolean preventAdds, final Consumer<Throwable> exceptionHandler) {
|
|
+ Validate.notNull(consumer, "Null consumer");
|
|
+ Validate.notNull(exceptionHandler, "Null exception handler");
|
|
+
|
|
+ /* This function assumes proper synchronization is made to ensure drain and no other read function are called concurrently */
|
|
+ /* This allows plain write usages instead of opaque or higher */
|
|
+ int total = 0;
|
|
+
|
|
+ final LinkedNode<E> head = this.getHeadAcquire(); /* Required to synchronize with the write to the first element field */
|
|
+ LinkedNode<E> curr = head;
|
|
+
|
|
+ for (;;) {
|
|
+ /* Volatile acquires with the write to the element field */
|
|
+ final E currentVal = curr.getElementPlain();
|
|
+ LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ if (next == curr) {
|
|
+ /* Add-locked nodes always have a null value */
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (currentVal == null) {
|
|
+ if (next == null) {
|
|
+ if (preventAdds && (next = curr.compareExchangeNextVolatile(null, curr)) != null) {
|
|
+ // failed to prevent adds, continue
|
|
+ curr = next;
|
|
+ continue;
|
|
+ } else {
|
|
+ // we're done here
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ curr = next;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ consumer.accept(currentVal);
|
|
+ } catch (final Exception ex) {
|
|
+ this.setHeadOpaque(next != null ? next : curr); /* Avoid perf penalty (of reiterating) if the exception handler decides to re-throw */
|
|
+ curr.setElementOpaque(null); /* set here, we might re-throw */
|
|
+
|
|
+ exceptionHandler.accept(ex);
|
|
+ }
|
|
+
|
|
+ curr.setElementOpaque(null);
|
|
+
|
|
+ ++total;
|
|
+
|
|
+ if (next == null) {
|
|
+ if (preventAdds && (next = curr.compareExchangeNextVolatile(null, curr)) != null) {
|
|
+ /* Retry with next value */
|
|
+ curr = next;
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ curr = next;
|
|
+ }
|
|
+ if (curr != head) {
|
|
+ this.setHeadOpaque(curr); /* While this may be a plain write, eventually publish it for methods such as find. */
|
|
+ }
|
|
+ return total;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Spliterator<E> spliterator() { // TODO implement
|
|
+ return Spliterators.spliterator(this, Spliterator.CONCURRENT |
|
|
+ Spliterator.NONNULL | Spliterator.ORDERED);
|
|
+ }
|
|
+
|
|
+ protected static final class LinkedNode<E> {
|
|
+
|
|
+ protected volatile Object element;
|
|
+ protected volatile LinkedNode<E> next;
|
|
+
|
|
+ protected static final VarHandle ELEMENT_HANDLE = ConcurrentUtil.getVarHandle(LinkedNode.class, "element", Object.class);
|
|
+ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(LinkedNode.class, "next", LinkedNode.class);
|
|
+
|
|
+ protected LinkedNode(final Object element, final LinkedNode<E> next) {
|
|
+ ELEMENT_HANDLE.set(this, element);
|
|
+ NEXT_HANDLE.set(this, next);
|
|
+ }
|
|
+
|
|
+ /* element */
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final E getElementPlain() {
|
|
+ return (E)ELEMENT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final E getElementVolatile() {
|
|
+ return (E)ELEMENT_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ protected final void setElementPlain(final E update) {
|
|
+ ELEMENT_HANDLE.set(this, (Object)update);
|
|
+ }
|
|
+
|
|
+ protected final void setElementOpaque(final E update) {
|
|
+ ELEMENT_HANDLE.setOpaque(this, (Object)update);
|
|
+ }
|
|
+
|
|
+ protected final void setElementVolatile(final E update) {
|
|
+ ELEMENT_HANDLE.setVolatile(this, (Object)update);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final E getAndSetElementVolatile(final E update) {
|
|
+ return (E)ELEMENT_HANDLE.getAndSet(this, update);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final E compareExchangeElementVolatile(final E expect, final E update) {
|
|
+ return (E)ELEMENT_HANDLE.compareAndExchange(this, expect, update);
|
|
+ }
|
|
+
|
|
+ /* next */
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getNextPlain() {
|
|
+ return (LinkedNode<E>)NEXT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getNextOpaque() {
|
|
+ return (LinkedNode<E>)NEXT_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getNextAcquire() {
|
|
+ return (LinkedNode<E>)NEXT_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> getNextVolatile() {
|
|
+ return (LinkedNode<E>)NEXT_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ protected final void setNextPlain(final LinkedNode<E> next) {
|
|
+ NEXT_HANDLE.set(this, next);
|
|
+ }
|
|
+
|
|
+ protected final void setNextVolatile(final LinkedNode<E> next) {
|
|
+ NEXT_HANDLE.setVolatile(this, next);
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ protected final LinkedNode<E> compareExchangeNextVolatile(final LinkedNode<E> expect, final LinkedNode<E> set) {
|
|
+ return (LinkedNode<E>)NEXT_HANDLE.compareAndExchange(this, expect, set);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class LinkedIterator<E> implements Iterator<E> {
|
|
+
|
|
+ protected LinkedNode<E> curr; /* last returned by next() */
|
|
+ protected LinkedNode<E> next; /* next to return from next() */
|
|
+ protected E nextElement; /* cached to avoid a race condition with removing or polling */
|
|
+
|
|
+ protected LinkedIterator(final LinkedNode<E> start) {
|
|
+ /* setup nextElement and next */
|
|
+ for (LinkedNode<E> curr = start;;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ final E element = curr.getElementPlain();
|
|
+
|
|
+ if (element != null) {
|
|
+ this.nextElement = element;
|
|
+ this.next = curr;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final void findNext() {
|
|
+ /* only called if this.nextElement != null, which means this.next != null */
|
|
+ for (LinkedNode<E> curr = this.next;;) {
|
|
+ final LinkedNode<E> next = curr.getNextVolatile();
|
|
+
|
|
+ if (next == null || next == curr) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ final E element = next.getElementPlain();
|
|
+
|
|
+ if (element != null) {
|
|
+ this.nextElement = element;
|
|
+ this.curr = this.next; /* this.next will be the value returned from next(), set this.curr for remove() */
|
|
+ this.next = next;
|
|
+ return;
|
|
+ }
|
|
+ curr = next;
|
|
+ }
|
|
+
|
|
+ /* out of nodes to iterate */
|
|
+ /* keep curr for remove() calls */
|
|
+ this.next = null;
|
|
+ this.nextElement = null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean hasNext() {
|
|
+ return this.nextElement != null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public E next() {
|
|
+ final E element = this.nextElement;
|
|
+
|
|
+ if (element == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ this.findNext();
|
|
+
|
|
+ return element;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void remove() {
|
|
+ if (this.curr == null) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ this.curr.setElementVolatile(null);
|
|
+ this.curr = null;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/completable/CallbackCompletable.java b/src/main/java/ca/spottedleaf/concurrentutil/completable/CallbackCompletable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..6bad6f8ecc0944d2f406924c7de7e227ff1e70fa
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/completable/CallbackCompletable.java
|
|
@@ -0,0 +1,110 @@
|
|
+package ca.spottedleaf.concurrentutil.completable;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
|
|
+import ca.spottedleaf.concurrentutil.executor.Cancellable;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import org.slf4j.Logger;
|
|
+import org.slf4j.LoggerFactory;
|
|
+import java.util.function.BiConsumer;
|
|
+
|
|
+public final class CallbackCompletable<T> {
|
|
+
|
|
+ private static final Logger LOGGER = LoggerFactory.getLogger(CallbackCompletable.class);
|
|
+
|
|
+ private final MultiThreadedQueue<BiConsumer<T, Throwable>> waiters = new MultiThreadedQueue<>();
|
|
+ private T result;
|
|
+ private Throwable throwable;
|
|
+ private volatile boolean completed;
|
|
+
|
|
+ public boolean isCompleted() {
|
|
+ return this.completed;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Note: Can only use after calling {@link #addAsynchronousWaiter(BiConsumer)}, as this function performs zero
|
|
+ * synchronisation
|
|
+ */
|
|
+ public T getResult() {
|
|
+ return this.result;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Note: Can only use after calling {@link #addAsynchronousWaiter(BiConsumer)}, as this function performs zero
|
|
+ * synchronisation
|
|
+ */
|
|
+ public Throwable getThrowable() {
|
|
+ return this.throwable;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds a waiter that should only be completed asynchronously by the complete() calls. If complete()
|
|
+ * has already been called, returns {@code null} and does not invoke the specified consumer.
|
|
+ * @param consumer Consumer to be executed on completion
|
|
+ * @throws NullPointerException If consumer is null
|
|
+ * @return A cancellable which will control the execution of the specified consumer
|
|
+ */
|
|
+ public Cancellable addAsynchronousWaiter(final BiConsumer<T, Throwable> consumer) {
|
|
+ if (this.waiters.add(consumer)) {
|
|
+ return new CancellableImpl(consumer);
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ private void completeAllWaiters(final T result, final Throwable throwable) {
|
|
+ this.completed = true;
|
|
+ BiConsumer<T, Throwable> waiter;
|
|
+ while ((waiter = this.waiters.pollOrBlockAdds()) != null) {
|
|
+ this.completeWaiter(waiter, result, throwable);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void completeWaiter(final BiConsumer<T, Throwable> consumer, final T result, final Throwable throwable) {
|
|
+ try {
|
|
+ consumer.accept(result, throwable);
|
|
+ } catch (final Throwable throwable2) {
|
|
+ LOGGER.error("Failed to complete callback " + ConcurrentUtil.genericToString(consumer), throwable2);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds a waiter that will be completed asynchronously by the complete() calls. If complete()
|
|
+ * has already been called, then invokes the consumer synchronously with the completed result.
|
|
+ * @param consumer Consumer to be executed on completion
|
|
+ * @throws NullPointerException If consumer is null
|
|
+ * @return A cancellable which will control the execution of the specified consumer
|
|
+ */
|
|
+ public Cancellable addWaiter(final BiConsumer<T, Throwable> consumer) {
|
|
+ if (this.waiters.add(consumer)) {
|
|
+ return new CancellableImpl(consumer);
|
|
+ }
|
|
+ this.completeWaiter(consumer, this.result, this.throwable);
|
|
+ return new CancellableImpl(consumer);
|
|
+ }
|
|
+
|
|
+ public void complete(final T result) {
|
|
+ this.result = result;
|
|
+ this.completeAllWaiters(result, null);
|
|
+ }
|
|
+
|
|
+ public void completeWithThrowable(final Throwable throwable) {
|
|
+ if (throwable == null) {
|
|
+ throw new NullPointerException("Throwable cannot be null");
|
|
+ }
|
|
+ this.throwable = throwable;
|
|
+ this.completeAllWaiters(null, throwable);
|
|
+ }
|
|
+
|
|
+ private final class CancellableImpl implements Cancellable {
|
|
+
|
|
+ private final BiConsumer<T, Throwable> waiter;
|
|
+
|
|
+ private CancellableImpl(final BiConsumer<T, Throwable> waiter) {
|
|
+ this.waiter = waiter;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ return CallbackCompletable.this.waiters.remove(this.waiter);
|
|
+ }
|
|
+ }
|
|
+}
|
|
\ No newline at end of file
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java b/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..365616439fa079017d648ed7f6ddf6950a691adf
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/completable/Completable.java
|
|
@@ -0,0 +1,737 @@
|
|
+package ca.spottedleaf.concurrentutil.completable;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Validate;
|
|
+import org.slf4j.Logger;
|
|
+import org.slf4j.LoggerFactory;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.concurrent.CompletableFuture;
|
|
+import java.util.concurrent.CompletionException;
|
|
+import java.util.concurrent.CompletionStage;
|
|
+import java.util.concurrent.Executor;
|
|
+import java.util.concurrent.ForkJoinPool;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+import java.util.function.BiConsumer;
|
|
+import java.util.function.BiFunction;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Function;
|
|
+import java.util.function.Supplier;
|
|
+
|
|
+public final class Completable<T> {
|
|
+
|
|
+ private static final Logger LOGGER = LoggerFactory.getLogger(Completable.class);
|
|
+ private static final Function<? super Throwable, ? extends Throwable> DEFAULT_EXCEPTION_HANDLER = (final Throwable thr) -> {
|
|
+ LOGGER.error("Unhandled exception during Completable operation", thr);
|
|
+ return thr;
|
|
+ };
|
|
+
|
|
+ public static Executor getDefaultExecutor() {
|
|
+ return ForkJoinPool.commonPool();
|
|
+ }
|
|
+
|
|
+ private static final Transform<?, ?> COMPLETED_STACK = new Transform<>(null, null, null, null) {
|
|
+ @Override
|
|
+ public void run() {}
|
|
+ };
|
|
+ private volatile Transform<?, T> completeStack;
|
|
+ private static final VarHandle COMPLETE_STACK_HANDLE = ConcurrentUtil.getVarHandle(Completable.class, "completeStack", Transform.class);
|
|
+
|
|
+ private static final Object NULL_MASK = new Object();
|
|
+ private volatile Object result;
|
|
+ private static final VarHandle RESULT_HANDLE = ConcurrentUtil.getVarHandle(Completable.class, "result", Object.class);
|
|
+
|
|
+ private Object getResultPlain() {
|
|
+ return (Object)RESULT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ private Object getResultVolatile() {
|
|
+ return (Object)RESULT_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ private void pushStackOrRun(final Transform<?, T> push) {
|
|
+ int failures = 0;
|
|
+ for (Transform<?, T> curr = (Transform<?, T>)COMPLETE_STACK_HANDLE.getVolatile(this);;) {
|
|
+ if (curr == COMPLETED_STACK) {
|
|
+ push.execute();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ push.next = curr;
|
|
+
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = (Transform<?, T>)COMPLETE_STACK_HANDLE.compareAndExchange(this, curr, push))) {
|
|
+ return;
|
|
+ }
|
|
+ push.next = null;
|
|
+ ++failures;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void propagateStack() {
|
|
+ Transform<?, T> topStack = (Transform<?, T>)COMPLETE_STACK_HANDLE.getAndSet(this, COMPLETED_STACK);
|
|
+ while (topStack != null) {
|
|
+ topStack.execute();
|
|
+ topStack = topStack.next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static Object maskNull(final Object res) {
|
|
+ return res == null ? NULL_MASK : res;
|
|
+ }
|
|
+
|
|
+ private static Object unmaskNull(final Object res) {
|
|
+ return res == NULL_MASK ? null : res;
|
|
+ }
|
|
+
|
|
+ private static Executor checkExecutor(final Executor executor) {
|
|
+ return Validate.notNull(executor, "Executor may not be null");
|
|
+ }
|
|
+
|
|
+ public Completable() {}
|
|
+
|
|
+ private Completable(final Object complete) {
|
|
+ COMPLETE_STACK_HANDLE.set(this, COMPLETED_STACK);
|
|
+ RESULT_HANDLE.setRelease(this, complete);
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> completed(final T value) {
|
|
+ return new Completable<>(maskNull(value));
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> failed(final Throwable ex) {
|
|
+ Validate.notNull(ex, "Exception may not be null");
|
|
+
|
|
+ return new Completable<>(new ExceptionResult(ex));
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> supplied(final Supplier<T> supplier) {
|
|
+ return supplied(supplier, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> supplied(final Supplier<T> supplier, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ try {
|
|
+ return completed(supplier.get());
|
|
+ } catch (final Throwable throwable) {
|
|
+ Throwable complete;
|
|
+ try {
|
|
+ complete = exceptionHandler.apply(throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ throwable.addSuppressed(thr2);
|
|
+ complete = throwable;
|
|
+ }
|
|
+ return failed(complete);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> suppliedAsync(final Supplier<T> supplier, final Executor executor) {
|
|
+ return suppliedAsync(supplier, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> suppliedAsync(final Supplier<T> supplier, final Executor executor, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+
|
|
+ class AsyncSuppliedCompletable implements Runnable, CompletableFuture.AsynchronousCompletionTask {
|
|
+ @Override
|
|
+ public void run() {
|
|
+ try {
|
|
+ ret.complete(supplier.get());
|
|
+ } catch (final Throwable throwable) {
|
|
+ Throwable complete;
|
|
+ try {
|
|
+ complete = exceptionHandler.apply(throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ throwable.addSuppressed(thr2);
|
|
+ complete = throwable;
|
|
+ }
|
|
+ ret.completeExceptionally(complete);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ executor.execute(new AsyncSuppliedCompletable());
|
|
+ } catch (final Throwable throwable) {
|
|
+ Throwable complete;
|
|
+ try {
|
|
+ complete = exceptionHandler.apply(throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ throwable.addSuppressed(thr2);
|
|
+ complete = throwable;
|
|
+ }
|
|
+ ret.completeExceptionally(complete);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ private boolean completeRaw(final Object value) {
|
|
+ if ((Object)RESULT_HANDLE.getVolatile(this) != null || !(boolean)RESULT_HANDLE.compareAndSet(this, (Object)null, value)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.propagateStack();
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public boolean complete(final T result) {
|
|
+ return this.completeRaw(maskNull(result));
|
|
+ }
|
|
+
|
|
+ public boolean completeExceptionally(final Throwable exception) {
|
|
+ Validate.notNull(exception, "Exception may not be null");
|
|
+
|
|
+ return this.completeRaw(new ExceptionResult(exception));
|
|
+ }
|
|
+
|
|
+ public boolean isDone() {
|
|
+ return this.getResultVolatile() != null;
|
|
+ }
|
|
+
|
|
+ public boolean isNormallyComplete() {
|
|
+ return this.getResultVolatile() != null && !(this.getResultVolatile() instanceof ExceptionResult);
|
|
+ }
|
|
+
|
|
+ public boolean isExceptionallyComplete() {
|
|
+ return this.getResultVolatile() instanceof ExceptionResult;
|
|
+ }
|
|
+
|
|
+ public Throwable getException() {
|
|
+ final Object res = this.getResultVolatile();
|
|
+ if (res == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (!(res instanceof ExceptionResult exRes)) {
|
|
+ throw new IllegalStateException("Not completed exceptionally");
|
|
+ }
|
|
+
|
|
+ return exRes.ex;
|
|
+ }
|
|
+
|
|
+ public T getNow(final T dfl) throws CompletionException {
|
|
+ final Object res = this.getResultVolatile();
|
|
+ if (res == null) {
|
|
+ return dfl;
|
|
+ }
|
|
+
|
|
+ if (res instanceof ExceptionResult exRes) {
|
|
+ throw new CompletionException(exRes.ex);
|
|
+ }
|
|
+
|
|
+ return (T)unmaskNull(res);
|
|
+ }
|
|
+
|
|
+ public T join() throws CompletionException {
|
|
+ if (this.isDone()) {
|
|
+ return this.getNow(null);
|
|
+ }
|
|
+
|
|
+ final UnparkTransform<T> unparkTransform = new UnparkTransform<>(this, Thread.currentThread());
|
|
+
|
|
+ this.pushStackOrRun(unparkTransform);
|
|
+
|
|
+ boolean interuptted = false;
|
|
+ while (!unparkTransform.isReleasable()) {
|
|
+ try {
|
|
+ ForkJoinPool.managedBlock(unparkTransform);
|
|
+ } catch (final InterruptedException ex) {
|
|
+ interuptted = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (interuptted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+
|
|
+ return this.getNow(null);
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<T> toFuture() {
|
|
+ final Object rawResult = this.getResultVolatile();
|
|
+ if (rawResult != null) {
|
|
+ if (rawResult instanceof ExceptionResult exRes) {
|
|
+ return CompletableFuture.failedFuture(exRes.ex);
|
|
+ } else {
|
|
+ return CompletableFuture.completedFuture((T)unmaskNull(rawResult));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final CompletableFuture<T> ret = new CompletableFuture<>();
|
|
+
|
|
+ class ToFuture implements BiConsumer<T, Throwable> {
|
|
+
|
|
+ @Override
|
|
+ public void accept(final T res, final Throwable ex) {
|
|
+ if (ex != null) {
|
|
+ ret.completeExceptionally(ex);
|
|
+ } else {
|
|
+ ret.complete(res);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.whenComplete(new ToFuture());
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public static <T> Completable<T> fromFuture(final CompletionStage<T> stage) {
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+
|
|
+ class FromFuture implements BiConsumer<T, Throwable> {
|
|
+ @Override
|
|
+ public void accept(final T res, final Throwable ex) {
|
|
+ if (ex != null) {
|
|
+ ret.completeExceptionally(ex);
|
|
+ } else {
|
|
+ ret.complete(res);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ stage.whenComplete(new FromFuture());
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public <U> Completable<U> thenApply(final Function<? super T, ? extends U> function) {
|
|
+ return this.thenApply(function, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> thenApply(final Function<? super T, ? extends U> function, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<U> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new ApplyTransform<>(null, this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> thenApplyAsync(final Function<? super T, ? extends U> function) {
|
|
+ return this.thenApplyAsync(function, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> thenApplyAsync(final Function<? super T, ? extends U> function, final Executor executor) {
|
|
+ return this.thenApplyAsync(function, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> thenApplyAsync(final Function<? super T, ? extends U> function, final Executor executor, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<U> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new ApplyTransform<>(checkExecutor(executor), this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public Completable<Void> thenAccept(final Consumer<? super T> consumer) {
|
|
+ return this.thenAccept(consumer, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenAccept(final Consumer<? super T> consumer, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(consumer, "Consumer may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<Void> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new AcceptTransform<>(null, this, ret, exceptionHandler, consumer));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenAcceptAsync(final Consumer<? super T> consumer) {
|
|
+ return this.thenAcceptAsync(consumer, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenAcceptAsync(final Consumer<? super T> consumer, final Executor executor) {
|
|
+ return this.thenAcceptAsync(consumer, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenAcceptAsync(final Consumer<? super T> consumer, final Executor executor, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(consumer, "Consumer may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<Void> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new AcceptTransform<>(checkExecutor(executor), this, ret, exceptionHandler, consumer));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public Completable<Void> thenRun(final Runnable run) {
|
|
+ return this.thenRun(run, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenRun(final Runnable run, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(run, "Run may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<Void> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new RunTransform<>(null, this, ret, exceptionHandler, run));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenRunAsync(final Runnable run) {
|
|
+ return this.thenRunAsync(run, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenRunAsync(final Runnable run, final Executor executor) {
|
|
+ return this.thenRunAsync(run, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<Void> thenRunAsync(final Runnable run, final Executor executor, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(run, "Run may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<Void> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new RunTransform<>(checkExecutor(executor), this, ret, exceptionHandler, run));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public <U> Completable<U> handle(final BiFunction<? super T, ? super Throwable, ? extends U> function) {
|
|
+ return this.handle(function, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> handle(final BiFunction<? super T, ? super Throwable, ? extends U> function,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<U> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new HandleTransform<>(null, this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> handleAsync(final BiFunction<? super T, ? super Throwable, ? extends U> function) {
|
|
+ return this.handleAsync(function, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> handleAsync(final BiFunction<? super T, ? super Throwable, ? extends U> function,
|
|
+ final Executor executor) {
|
|
+ return this.handleAsync(function, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public <U> Completable<U> handleAsync(final BiFunction<? super T, ? super Throwable, ? extends U> function,
|
|
+ final Executor executor,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<U> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new HandleTransform<>(checkExecutor(executor), this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public Completable<T> whenComplete(final BiConsumer<? super T, ? super Throwable> consumer) {
|
|
+ return this.whenComplete(consumer, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> whenComplete(final BiConsumer<? super T, ? super Throwable> consumer, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(consumer, "Consumer may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new WhenTransform<>(null, this, ret, exceptionHandler, consumer));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Completable<T> whenCompleteAsync(final BiConsumer<? super T, ? super Throwable> consumer) {
|
|
+ return this.whenCompleteAsync(consumer, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> whenCompleteAsync(final BiConsumer<? super T, ? super Throwable> consumer, final Executor executor) {
|
|
+ return this.whenCompleteAsync(consumer, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> whenCompleteAsync(final BiConsumer<? super T, ? super Throwable> consumer, final Executor executor,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(consumer, "Consumer may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new WhenTransform<>(checkExecutor(executor), this, ret, exceptionHandler, consumer));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ public Completable<T> exceptionally(final Function<Throwable, ? extends T> function) {
|
|
+ return this.exceptionally(function, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> exceptionally(final Function<Throwable, ? extends T> function, final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new ExceptionallyTransform<>(null, this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Completable<T> exceptionallyAsync(final Function<Throwable, ? extends T> function) {
|
|
+ return this.exceptionallyAsync(function, getDefaultExecutor(), DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> exceptionallyAsync(final Function<Throwable, ? extends T> function, final Executor executor) {
|
|
+ return this.exceptionallyAsync(function, executor, DEFAULT_EXCEPTION_HANDLER);
|
|
+ }
|
|
+
|
|
+ public Completable<T> exceptionallyAsync(final Function<Throwable, ? extends T> function, final Executor executor,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ Validate.notNull(function, "Function may not be null");
|
|
+ Validate.notNull(exceptionHandler, "Exception handler may not be null");
|
|
+
|
|
+ final Completable<T> ret = new Completable<>();
|
|
+ this.pushStackOrRun(new ExceptionallyTransform<>(checkExecutor(executor), this, ret, exceptionHandler, function));
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ private static final class ExceptionResult {
|
|
+ public final Throwable ex;
|
|
+
|
|
+ public ExceptionResult(final Throwable ex) {
|
|
+ this.ex = ex;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static abstract class Transform<U, T> implements Runnable, CompletableFuture.AsynchronousCompletionTask {
|
|
+
|
|
+ private Transform<?, T> next;
|
|
+
|
|
+ private final Executor executor;
|
|
+ protected final Completable<T> from;
|
|
+ protected final Completable<U> to;
|
|
+ protected final Function<? super Throwable, ? extends Throwable> exceptionHandler;
|
|
+
|
|
+ protected Transform(final Executor executor, final Completable<T> from, final Completable<U> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler) {
|
|
+ this.executor = executor;
|
|
+ this.from = from;
|
|
+ this.to = to;
|
|
+ this.exceptionHandler = exceptionHandler;
|
|
+ }
|
|
+
|
|
+ // force interface call to become virtual call
|
|
+ @Override
|
|
+ public abstract void run();
|
|
+
|
|
+ protected void failed(final Throwable throwable) {
|
|
+ Throwable complete;
|
|
+ try {
|
|
+ complete = this.exceptionHandler.apply(throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ throwable.addSuppressed(thr2);
|
|
+ complete = throwable;
|
|
+ }
|
|
+ this.to.completeExceptionally(complete);
|
|
+ }
|
|
+
|
|
+ public void execute() {
|
|
+ if (this.executor == null) {
|
|
+ this.run();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ this.executor.execute(this);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class ApplyTransform<U, T> extends Transform<U, T> {
|
|
+
|
|
+ private final Function<? super T, ? extends U> function;
|
|
+
|
|
+ public ApplyTransform(final Executor executor, final Completable<T> from, final Completable<U> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final Function<? super T, ? extends U> function) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.function = function;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.to.completeExceptionally(exRes.ex);
|
|
+ } else {
|
|
+ this.to.complete(this.function.apply((T)unmaskNull(result)));
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class AcceptTransform<T> extends Transform<Void, T> {
|
|
+ private final Consumer<? super T> consumer;
|
|
+
|
|
+ public AcceptTransform(final Executor executor, final Completable<T> from, final Completable<Void> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final Consumer<? super T> consumer) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.consumer = consumer;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.to.completeExceptionally(exRes.ex);
|
|
+ } else {
|
|
+ this.consumer.accept((T)unmaskNull(result));
|
|
+ this.to.complete(null);
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class RunTransform<T> extends Transform<Void, T> {
|
|
+ private final Runnable run;
|
|
+
|
|
+ public RunTransform(final Executor executor, final Completable<T> from, final Completable<Void> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final Runnable run) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.run = run;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.to.completeExceptionally(exRes.ex);
|
|
+ } else {
|
|
+ this.run.run();
|
|
+ this.to.complete(null);
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class HandleTransform<U, T> extends Transform<U, T> {
|
|
+
|
|
+ private final BiFunction<? super T, ? super Throwable, ? extends U> function;
|
|
+
|
|
+ public HandleTransform(final Executor executor, final Completable<T> from, final Completable<U> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final BiFunction<? super T, ? super Throwable, ? extends U> function) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.function = function;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.to.complete(this.function.apply(null, exRes.ex));
|
|
+ } else {
|
|
+ this.to.complete(this.function.apply((T)unmaskNull(result), null));
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class WhenTransform<T> extends Transform<T, T> {
|
|
+
|
|
+ private final BiConsumer<? super T, ? super Throwable> consumer;
|
|
+
|
|
+ public WhenTransform(final Executor executor, final Completable<T> from, final Completable<T> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final BiConsumer<? super T, ? super Throwable> consumer) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.consumer = consumer;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.consumer.accept(null, exRes.ex);
|
|
+ this.to.completeExceptionally(exRes.ex);
|
|
+ } else {
|
|
+ final T unmasked = (T)unmaskNull(result);
|
|
+ this.consumer.accept(unmasked, null);
|
|
+ this.to.complete(unmasked);
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class ExceptionallyTransform<T> extends Transform<T, T> {
|
|
+ private final Function<Throwable, ? extends T> function;
|
|
+
|
|
+ public ExceptionallyTransform(final Executor executor, final Completable<T> from, final Completable<T> to,
|
|
+ final Function<? super Throwable, ? extends Throwable> exceptionHandler,
|
|
+ final Function<Throwable, ? extends T> function) {
|
|
+ super(executor, from, to, exceptionHandler);
|
|
+ this.function = function;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Object result = this.from.getResultPlain();
|
|
+ try {
|
|
+ if (result instanceof ExceptionResult exRes) {
|
|
+ this.to.complete(this.function.apply(exRes.ex));
|
|
+ } else {
|
|
+ this.to.complete((T)unmaskNull(result));
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.failed(throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class UnparkTransform<T> extends Transform<Void, T> implements ForkJoinPool.ManagedBlocker {
|
|
+
|
|
+ private volatile Thread thread;
|
|
+
|
|
+ public UnparkTransform(final Completable<T> from, final Thread target) {
|
|
+ super(null, from, null, null);
|
|
+ this.thread = target;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final Thread t = this.thread;
|
|
+ this.thread = null;
|
|
+ LockSupport.unpark(t);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean block() throws InterruptedException {
|
|
+ while (!this.isReleasable()) {
|
|
+ if (Thread.interrupted()) {
|
|
+ throw new InterruptedException();
|
|
+ }
|
|
+ LockSupport.park(this);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isReleasable() {
|
|
+ return this.thread == null;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/Cancellable.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/Cancellable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..11449056361bb6c5a055f543cdd135c4113757c6
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/Cancellable.java
|
|
@@ -0,0 +1,14 @@
|
|
+package ca.spottedleaf.concurrentutil.executor;
|
|
+
|
|
+/**
|
|
+ * Interface specifying that something can be cancelled.
|
|
+ */
|
|
+public interface Cancellable {
|
|
+
|
|
+ /**
|
|
+ * Tries to cancel this task. If the task is in a stage that is too late to be cancelled, then this function
|
|
+ * will return {@code false}. If the task is already cancelled, then this function returns {@code false}. Only
|
|
+ * when this function successfully stops this task from being completed will it return {@code true}.
|
|
+ */
|
|
+ public boolean cancel();
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/PrioritisedExecutor.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/PrioritisedExecutor.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..17cbaee1e89bd3f6d905e640d20d0119ab0570a0
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/PrioritisedExecutor.java
|
|
@@ -0,0 +1,271 @@
|
|
+package ca.spottedleaf.concurrentutil.executor;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.util.Priority;
|
|
+
|
|
+public interface PrioritisedExecutor {
|
|
+
|
|
+ /**
|
|
+ * Returns the number of tasks that have been scheduled are pending to be scheduled.
|
|
+ */
|
|
+ public long getTotalTasksScheduled();
|
|
+
|
|
+ /**
|
|
+ * Returns the number of tasks that have been executed.
|
|
+ */
|
|
+ public long getTotalTasksExecuted();
|
|
+
|
|
+ /**
|
|
+ * Generates the next suborder id.
|
|
+ * @return The next suborder id.
|
|
+ */
|
|
+ public long generateNextSubOrder();
|
|
+
|
|
+ /**
|
|
+ * Executes the next available task.
|
|
+ * <p>
|
|
+ * If there is a task with priority {@link Priority#BLOCKING} available, then that such task is executed.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * If there is a task with priority {@link Priority#IDLE} available then that task is only executed
|
|
+ * when there are no other tasks available with a higher priority.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * If there are no tasks that have priority {@link Priority#BLOCKING} or {@link Priority#IDLE}, then
|
|
+ * this function will be biased to execute tasks that have higher priorities.
|
|
+ * </p>
|
|
+ *
|
|
+ * @return {@code true} if a task was executed, {@code false} otherwise
|
|
+ * @throws IllegalStateException If the current thread is not allowed to execute a task
|
|
+ */
|
|
+ public boolean executeTask() throws IllegalStateException;
|
|
+
|
|
+ /**
|
|
+ * Prevent further additions to this executor. Attempts to add after this call has completed (potentially during) will
|
|
+ * result in {@link IllegalStateException} being thrown.
|
|
+ * <p>
|
|
+ * This operation is atomic with respect to other shutdown calls
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * After this call has completed, regardless of return value, this executor will be shutdown.
|
|
+ * </p>
|
|
+ *
|
|
+ * @return {@code true} if the executor was shutdown, {@code false} if it has shut down already
|
|
+ * @see #isShutdown()
|
|
+ */
|
|
+ public boolean shutdown();
|
|
+
|
|
+ /**
|
|
+ * Returns whether this executor has shut down. Effectively, returns whether new tasks will be rejected.
|
|
+ * This method does not indicate whether all the tasks scheduled have been executed.
|
|
+ * @return Returns whether this executor has shut down.
|
|
+ */
|
|
+ public boolean isShutdown();
|
|
+
|
|
+ /**
|
|
+ * Queues or executes a task at {@link Priority#NORMAL} priority.
|
|
+ * @param task The task to run.
|
|
+ *
|
|
+ * @throws IllegalStateException If this executor has shutdown.
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask queueTask(final Runnable task);
|
|
+
|
|
+ /**
|
|
+ * Queues or executes a task.
|
|
+ *
|
|
+ * @param task The task to run.
|
|
+ * @param priority The priority for the task.
|
|
+ *
|
|
+ * @throws IllegalStateException If this executor has shutdown.
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @throws IllegalArgumentException If the priority is invalid.
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority);
|
|
+
|
|
+ /**
|
|
+ * Queues or executes a task.
|
|
+ *
|
|
+ * @param task The task to run.
|
|
+ * @param priority The priority for the task.
|
|
+ * @param subOrder The task's suborder.
|
|
+ *
|
|
+ * @throws IllegalStateException If this executor has shutdown.
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @throws IllegalArgumentException If the priority is invalid.
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder);
|
|
+
|
|
+ /**
|
|
+ * Creates, but does not queue or execute, a task at {@link Priority#NORMAL} priority.
|
|
+ * @param task The task to run.
|
|
+ *
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask createTask(final Runnable task);
|
|
+
|
|
+ /**
|
|
+ * Creates, but does not queue or execute, a task at {@link Priority#NORMAL} priority.
|
|
+ *
|
|
+ * @param task The task to run.
|
|
+ * @param priority The priority for the task.
|
|
+ *
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @throws IllegalArgumentException If the priority is invalid.
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority);
|
|
+
|
|
+ /**
|
|
+ * Creates, but does not queue or execute, a task at {@link Priority#NORMAL} priority.
|
|
+ *
|
|
+ * @param task The task to run.
|
|
+ * @param priority The priority for the task.
|
|
+ * @param subOrder The task's suborder.
|
|
+ *
|
|
+ * @throws NullPointerException If the task is null
|
|
+ * @throws IllegalArgumentException If the priority is invalid.
|
|
+ * @return {@code null} if the current thread immediately executed the task, else returns the prioritised task
|
|
+ * associated with the parameter
|
|
+ */
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder);
|
|
+
|
|
+ public static interface PrioritisedTask extends Cancellable {
|
|
+
|
|
+ /**
|
|
+ * Returns the executor associated with this task.
|
|
+ * @return The executor associated with this task.
|
|
+ */
|
|
+ public PrioritisedExecutor getExecutor();
|
|
+
|
|
+ /**
|
|
+ * Causes a lazily queued task to become queued or executed
|
|
+ *
|
|
+ * @throws IllegalStateException If the backing executor has shutdown
|
|
+ * @return {@code true} If the task was queued, {@code false} if the task was already queued/cancelled/executed
|
|
+ */
|
|
+ public boolean queue();
|
|
+
|
|
+ /**
|
|
+ * Returns whether this task has been queued and is not completing.
|
|
+ * @return {@code true} If the task has been queued, {@code false} if the task has not been queued or is marked
|
|
+ * as completing.
|
|
+ */
|
|
+ public boolean isQueued();
|
|
+
|
|
+ /**
|
|
+ * Forces this task to be marked as completed.
|
|
+ *
|
|
+ * @return {@code true} if the task was cancelled, {@code false} if the task has already completed
|
|
+ * or is being completed.
|
|
+ */
|
|
+ @Override
|
|
+ public boolean cancel();
|
|
+
|
|
+ /**
|
|
+ * Executes this task. This will also mark the task as completing.
|
|
+ * <p>
|
|
+ * Exceptions thrown from the runnable will be rethrown.
|
|
+ * </p>
|
|
+ *
|
|
+ * @return {@code true} if this task was executed, {@code false} if it was already marked as completed.
|
|
+ */
|
|
+ public boolean execute();
|
|
+
|
|
+ /**
|
|
+ * Returns the current priority. Note that {@link Priority#COMPLETING} will be returned
|
|
+ * if this task is completing or has completed.
|
|
+ */
|
|
+ public Priority getPriority();
|
|
+
|
|
+ /**
|
|
+ * Attempts to set this task's priority level to the level specified.
|
|
+ *
|
|
+ * @param priority Specified priority level.
|
|
+ *
|
|
+ * @throws IllegalArgumentException If the priority is invalid
|
|
+ * @return {@code true} if successful, {@code false} if this task is completing or has completed or the queue
|
|
+ * this task was scheduled on was shutdown, or if the priority was already at the specified level.
|
|
+ */
|
|
+ public boolean setPriority(final Priority priority);
|
|
+
|
|
+ /**
|
|
+ * Attempts to raise the priority to the priority level specified.
|
|
+ *
|
|
+ * @param priority Priority specified
|
|
+ *
|
|
+ * @throws IllegalArgumentException If the priority is invalid
|
|
+ * @return {@code false} if the current task is completing, {@code true} if the priority was raised to the
|
|
+ * specified level or was already at the specified level or higher.
|
|
+ */
|
|
+ public boolean raisePriority(final Priority priority);
|
|
+
|
|
+ /**
|
|
+ * Attempts to lower the priority to the priority level specified.
|
|
+ *
|
|
+ * @param priority Priority specified
|
|
+ *
|
|
+ * @throws IllegalArgumentException If the priority is invalid
|
|
+ * @return {@code false} if the current task is completing, {@code true} if the priority was lowered to the
|
|
+ * specified level or was already at the specified level or lower.
|
|
+ */
|
|
+ public boolean lowerPriority(final Priority priority);
|
|
+
|
|
+ /**
|
|
+ * Returns the suborder id associated with this task.
|
|
+ * @return The suborder id associated with this task.
|
|
+ */
|
|
+ public long getSubOrder();
|
|
+
|
|
+ /**
|
|
+ * Sets the suborder id associated with this task. Ths function has no effect when this task
|
|
+ * is completing or is completed.
|
|
+ *
|
|
+ * @param subOrder Specified new sub order.
|
|
+ *
|
|
+ * @return {@code true} if successful, {@code false} if this task is completing or has completed or the queue
|
|
+ * this task was scheduled on was shutdown, or if the current suborder is the same as the new sub order.
|
|
+ */
|
|
+ public boolean setSubOrder(final long subOrder);
|
|
+
|
|
+ /**
|
|
+ * Attempts to raise the suborder to the suborder specified.
|
|
+ *
|
|
+ * @param subOrder Specified new sub order.
|
|
+ *
|
|
+ * @return {@code false} if the current task is completing, {@code true} if the suborder was raised to the
|
|
+ * specified suborder or was already at the specified suborder or higher.
|
|
+ */
|
|
+ public boolean raiseSubOrder(final long subOrder);
|
|
+
|
|
+ /**
|
|
+ * Attempts to lower the suborder to the suborder specified.
|
|
+ *
|
|
+ * @param subOrder Specified new sub order.
|
|
+ *
|
|
+ * @return {@code false} if the current task is completing, {@code true} if the suborder was lowered to the
|
|
+ * specified suborder or was already at the specified suborder or lower.
|
|
+ */
|
|
+ public boolean lowerSubOrder(final long subOrder);
|
|
+
|
|
+ /**
|
|
+ * Sets the priority and suborder id associated with this task. Ths function has no effect when this task
|
|
+ * is completing or is completed.
|
|
+ *
|
|
+ * @param priority Priority specified
|
|
+ * @param subOrder Specified new sub order.
|
|
+ * @return {@code true} if successful, {@code false} if this task is completing or has completed or the queue
|
|
+ * this task was scheduled on was shutdown, or if the current priority and suborder are the same as
|
|
+ * the parameters.
|
|
+ */
|
|
+ public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder);
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/queue/PrioritisedTaskQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/queue/PrioritisedTaskQueue.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..edb8c6611bdc9aced2714b963e00bbb7829603d2
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/queue/PrioritisedTaskQueue.java
|
|
@@ -0,0 +1,454 @@
|
|
+package ca.spottedleaf.concurrentutil.executor.queue;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Priority;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Comparator;
|
|
+import java.util.Map;
|
|
+import java.util.concurrent.ConcurrentSkipListMap;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+
|
|
+public final class PrioritisedTaskQueue implements PrioritisedExecutor {
|
|
+
|
|
+ /**
|
|
+ * Required for tie-breaking in the queue
|
|
+ */
|
|
+ private final AtomicLong taskIdGenerator = new AtomicLong();
|
|
+ private final AtomicLong scheduledTasks = new AtomicLong();
|
|
+ private final AtomicLong executedTasks = new AtomicLong();
|
|
+ private final AtomicLong subOrderGenerator = new AtomicLong();
|
|
+ private final AtomicBoolean shutdown = new AtomicBoolean();
|
|
+ private final ConcurrentSkipListMap<PrioritisedQueuedTask.Holder, Boolean> tasks = new ConcurrentSkipListMap<>(PrioritisedQueuedTask.COMPARATOR);
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksScheduled() {
|
|
+ return this.scheduledTasks.get();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksExecuted() {
|
|
+ return this.executedTasks.get();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long generateNextSubOrder() {
|
|
+ return this.subOrderGenerator.getAndIncrement();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean shutdown() {
|
|
+ return !this.shutdown.getAndSet(true);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isShutdown() {
|
|
+ return this.shutdown.get();
|
|
+ }
|
|
+
|
|
+ public PrioritisedTask peekFirst() {
|
|
+ final Map.Entry<PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.firstEntry();
|
|
+ return firstEntry == null ? null : firstEntry.getKey().task;
|
|
+ }
|
|
+
|
|
+ public Priority getHighestPriority() {
|
|
+ final Map.Entry<PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.firstEntry();
|
|
+ return firstEntry == null ? null : Priority.getPriority(firstEntry.getKey().priority);
|
|
+ }
|
|
+
|
|
+ public boolean hasNoScheduledTasks() {
|
|
+ final long executedTasks = this.executedTasks.get();
|
|
+ final long scheduledTasks = this.scheduledTasks.get();
|
|
+
|
|
+ return executedTasks == scheduledTasks;
|
|
+ }
|
|
+
|
|
+ public PrioritySubOrderPair getHighestPrioritySubOrder() {
|
|
+ final Map.Entry<PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.firstEntry();
|
|
+ if (firstEntry == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final PrioritisedQueuedTask.Holder holder = firstEntry.getKey();
|
|
+
|
|
+ return new PrioritySubOrderPair(Priority.getPriority(holder.priority), holder.subOrder);
|
|
+ }
|
|
+
|
|
+ public Runnable pollTask() {
|
|
+ for (;;) {
|
|
+ final Map.Entry<PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.pollFirstEntry();
|
|
+ if (firstEntry != null) {
|
|
+ final PrioritisedQueuedTask.Holder task = firstEntry.getKey();
|
|
+ task.markRemoved();
|
|
+ if (!task.task.cancel()) {
|
|
+ continue;
|
|
+ }
|
|
+ return task.task.execute;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean executeTask() {
|
|
+ for (;;) {
|
|
+ final Map.Entry<PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.pollFirstEntry();
|
|
+ if (firstEntry != null) {
|
|
+ final PrioritisedQueuedTask.Holder task = firstEntry.getKey();
|
|
+ task.markRemoved();
|
|
+ if (!task.task.execute()) {
|
|
+ continue;
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task) {
|
|
+ return this.createTask(task, Priority.NORMAL, this.generateNextSubOrder());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority) {
|
|
+ return this.createTask(task, priority, this.generateNextSubOrder());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ return new PrioritisedQueuedTask(task, priority, subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task) {
|
|
+ return this.queueTask(task, Priority.NORMAL, this.generateNextSubOrder());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority) {
|
|
+ return this.queueTask(task, priority, this.generateNextSubOrder());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ final PrioritisedQueuedTask ret = new PrioritisedQueuedTask(task, priority, subOrder);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ private final class PrioritisedQueuedTask implements PrioritisedExecutor.PrioritisedTask {
|
|
+ public static final Comparator<PrioritisedQueuedTask.Holder> COMPARATOR = (final PrioritisedQueuedTask.Holder t1, final PrioritisedQueuedTask.Holder t2) -> {
|
|
+ final int priorityCompare = t1.priority - t2.priority;
|
|
+ if (priorityCompare != 0) {
|
|
+ return priorityCompare;
|
|
+ }
|
|
+
|
|
+ final int subOrderCompare = Long.compare(t1.subOrder, t2.subOrder);
|
|
+ if (subOrderCompare != 0) {
|
|
+ return subOrderCompare;
|
|
+ }
|
|
+
|
|
+ return Long.compare(t1.id, t2.id);
|
|
+ };
|
|
+
|
|
+ private static final class Holder {
|
|
+ private final PrioritisedQueuedTask task;
|
|
+ private final int priority;
|
|
+ private final long subOrder;
|
|
+ private final long id;
|
|
+
|
|
+ private volatile boolean removed;
|
|
+ private static final VarHandle REMOVED_HANDLE = ConcurrentUtil.getVarHandle(Holder.class, "removed", boolean.class);
|
|
+
|
|
+ private Holder(final PrioritisedQueuedTask task, final int priority, final long subOrder,
|
|
+ final long id) {
|
|
+ this.task = task;
|
|
+ this.priority = priority;
|
|
+ this.subOrder = subOrder;
|
|
+ this.id = id;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns true if marked as removed
|
|
+ */
|
|
+ public boolean markRemoved() {
|
|
+ return !(boolean)REMOVED_HANDLE.getAndSet((Holder)this, (boolean)true);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private final long id;
|
|
+ private final Runnable execute;
|
|
+
|
|
+ private Priority priority;
|
|
+ private long subOrder;
|
|
+ private Holder holder;
|
|
+
|
|
+ public PrioritisedQueuedTask(final Runnable execute, final Priority priority, final long subOrder) {
|
|
+ if (!Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ this.execute = execute;
|
|
+ this.priority = priority;
|
|
+ this.subOrder = subOrder;
|
|
+ this.id = PrioritisedTaskQueue.this.taskIdGenerator.getAndIncrement();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor getExecutor() {
|
|
+ return PrioritisedTaskQueue.this;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean queue() {
|
|
+ synchronized (this) {
|
|
+ if (this.holder != null || this.priority == Priority.COMPLETING) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (PrioritisedTaskQueue.this.isShutdown()) {
|
|
+ throw new IllegalStateException("Queue is shutdown");
|
|
+ }
|
|
+
|
|
+ final Holder holder = new Holder(this, this.priority.priority, this.subOrder, this.id);
|
|
+ this.holder = holder;
|
|
+
|
|
+ PrioritisedTaskQueue.this.scheduledTasks.getAndIncrement();
|
|
+ PrioritisedTaskQueue.this.tasks.put(holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ if (PrioritisedTaskQueue.this.isShutdown()) {
|
|
+ this.cancel();
|
|
+ throw new IllegalStateException("Queue is shutdown");
|
|
+ }
|
|
+
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isQueued() {
|
|
+ synchronized (this) {
|
|
+ return this.holder != null && this.priority != Priority.COMPLETING;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = Priority.COMPLETING;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ PrioritisedTaskQueue.this.executedTasks.getAndIncrement();
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean execute() {
|
|
+ final boolean increaseExecuted;
|
|
+
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = Priority.COMPLETING;
|
|
+
|
|
+ if (increaseExecuted = (this.holder != null)) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ this.execute.run();
|
|
+ return true;
|
|
+ } finally {
|
|
+ if (increaseExecuted) {
|
|
+ PrioritisedTaskQueue.this.executedTasks.getAndIncrement();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Priority getPriority() {
|
|
+ synchronized (this) {
|
|
+ return this.priority;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriority(final Priority priority) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.priority == priority) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = priority;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raisePriority(final Priority priority) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.priority.isHigherOrEqualPriority(priority)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = priority;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerPriority(Priority priority) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.priority.isLowerOrEqualPriority(priority)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = priority;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getSubOrder() {
|
|
+ synchronized (this) {
|
|
+ return this.subOrder;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setSubOrder(final long subOrder) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.subOrder == subOrder) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.subOrder = subOrder;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raiseSubOrder(long subOrder) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.subOrder >= subOrder) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.subOrder = subOrder;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerSubOrder(final long subOrder) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || this.subOrder <= subOrder) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.subOrder = subOrder;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder) {
|
|
+ synchronized (this) {
|
|
+ if (this.priority == Priority.COMPLETING || (this.priority == priority && this.subOrder == subOrder)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.priority = priority;
|
|
+ this.subOrder = subOrder;
|
|
+
|
|
+ if (this.holder != null) {
|
|
+ if (this.holder.markRemoved()) {
|
|
+ PrioritisedTaskQueue.this.tasks.remove(this.holder);
|
|
+ }
|
|
+ this.holder = new Holder(this, priority.priority, this.subOrder, this.id);
|
|
+ PrioritisedTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static record PrioritySubOrderPair(Priority priority, long subOrder) {}
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedQueueExecutorThread.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedQueueExecutorThread.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..f5367a13aaa02f0f929813c00a67e6ac7c8652cb
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedQueueExecutorThread.java
|
|
@@ -0,0 +1,402 @@
|
|
+package ca.spottedleaf.concurrentutil.executor.thread;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Priority;
|
|
+import org.slf4j.Logger;
|
|
+import org.slf4j.LoggerFactory;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+
|
|
+/**
|
|
+ * Thread which will continuously drain from a specified queue.
|
|
+ * <p>
|
|
+ * Note: When using this thread, queue additions to the underlying {@link #queue} are not sufficient to get this thread
|
|
+ * to execute the task. The function {@link #notifyTasks()} must be used after scheduling a task. For expected behaviour
|
|
+ * of task scheduling, use the methods provided on this class to schedule tasks.
|
|
+ * </p>
|
|
+ */
|
|
+public class PrioritisedQueueExecutorThread extends Thread implements PrioritisedExecutor {
|
|
+
|
|
+ private static final Logger LOGGER = LoggerFactory.getLogger(PrioritisedQueueExecutorThread.class);
|
|
+
|
|
+ protected final PrioritisedExecutor queue;
|
|
+
|
|
+ protected volatile boolean threadShutdown;
|
|
+
|
|
+ protected volatile boolean threadParked;
|
|
+ protected static final VarHandle THREAD_PARKED_HANDLE = ConcurrentUtil.getVarHandle(PrioritisedQueueExecutorThread.class, "threadParked", boolean.class);
|
|
+
|
|
+ protected volatile boolean halted;
|
|
+
|
|
+ protected final long spinWaitTime;
|
|
+
|
|
+ protected static final long DEFAULT_SPINWAIT_TIME = (long)(0.1e6);// 0.1ms
|
|
+
|
|
+ public PrioritisedQueueExecutorThread(final PrioritisedExecutor queue) {
|
|
+ this(queue, DEFAULT_SPINWAIT_TIME); // 0.1ms
|
|
+ }
|
|
+
|
|
+ public PrioritisedQueueExecutorThread(final PrioritisedExecutor queue, final long spinWaitTime) { // in ns
|
|
+ this.queue = queue;
|
|
+ this.spinWaitTime = spinWaitTime;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public final void run() {
|
|
+ try {
|
|
+ this.begin();
|
|
+ this.doRun();
|
|
+ } finally {
|
|
+ this.die();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final void doRun() {
|
|
+ final long spinWaitTime = this.spinWaitTime;
|
|
+
|
|
+ main_loop:
|
|
+ for (;;) {
|
|
+ this.pollTasks();
|
|
+
|
|
+ // spinwait
|
|
+
|
|
+ final long start = System.nanoTime();
|
|
+
|
|
+ for (;;) {
|
|
+ // If we are interrupted for any reason, park() will always return immediately. Clear so that we don't needlessly use cpu in such an event.
|
|
+ Thread.interrupted();
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos("Spinwaiting on tasks", 10_000L); // 10us
|
|
+
|
|
+ if (this.pollTasks()) {
|
|
+ // restart loop, found tasks
|
|
+ continue main_loop;
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return; // we're done
|
|
+ }
|
|
+
|
|
+ if ((System.nanoTime() - start) >= spinWaitTime) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.setThreadParkedVolatile(true);
|
|
+
|
|
+ // We need to parse here to avoid a race condition where a thread queues a task before we set parked to true
|
|
+ // (i.e. it will not notify us)
|
|
+ if (this.pollTasks()) {
|
|
+ this.setThreadParkedVolatile(false);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // we don't need to check parked before sleeping, but we do need to check parked in a do-while loop
|
|
+ // LockSupport.park() can fail for any reason
|
|
+ while (this.getThreadParkedVolatile()) {
|
|
+ Thread.interrupted();
|
|
+ LockSupport.park("Waiting on tasks");
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected void begin() {}
|
|
+
|
|
+ protected void die() {}
|
|
+
|
|
+ /**
|
|
+ * Attempts to poll as many tasks as possible, returning when finished.
|
|
+ * @return Whether any tasks were executed.
|
|
+ */
|
|
+ protected boolean pollTasks() {
|
|
+ boolean ret = false;
|
|
+
|
|
+ for (;;) {
|
|
+ if (this.halted) {
|
|
+ break;
|
|
+ }
|
|
+ try {
|
|
+ if (!this.queue.executeTask()) {
|
|
+ break;
|
|
+ }
|
|
+ ret = true;
|
|
+ } catch (final Throwable throwable) {
|
|
+ LOGGER.error("Exception thrown from prioritized runnable task in thread '" + this.getName() + "'", throwable);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ protected boolean handleClose() {
|
|
+ if (this.threadShutdown) {
|
|
+ this.pollTasks(); // this ensures we've emptied the queue
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Notify this thread that a task has been added to its queue
|
|
+ * @return {@code true} if this thread was waiting for tasks, {@code false} if it is executing tasks
|
|
+ */
|
|
+ public boolean notifyTasks() {
|
|
+ if (this.getThreadParkedVolatile() && this.exchangeThreadParkedVolatile(false)) {
|
|
+ LockSupport.unpark(this);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksExecuted() {
|
|
+ return this.queue.getTotalTasksExecuted();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksScheduled() {
|
|
+ return this.queue.getTotalTasksScheduled();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long generateNextSubOrder() {
|
|
+ return this.queue.generateNextSubOrder();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean shutdown() {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isShutdown() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * @throws IllegalStateException Always
|
|
+ */
|
|
+ @Override
|
|
+ public boolean executeTask() throws IllegalStateException {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task) {
|
|
+ final PrioritisedTask ret = this.createTask(task);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority) {
|
|
+ final PrioritisedTask ret = this.createTask(task, priority);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ final PrioritisedTask ret = this.createTask(task, priority, subOrder);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(Runnable task) {
|
|
+ final PrioritisedTask queueTask = this.queue.createTask(task);
|
|
+
|
|
+ return new WrappedTask(queueTask);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority) {
|
|
+ final PrioritisedTask queueTask = this.queue.createTask(task, priority);
|
|
+
|
|
+ return new WrappedTask(queueTask);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ final PrioritisedTask queueTask = this.queue.createTask(task, priority, subOrder);
|
|
+
|
|
+ return new WrappedTask(queueTask);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Closes this queue executor's queue. Optionally waits for all tasks in queue to be executed if {@code wait} is true.
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * @param wait If this call is to wait until this thread shuts down.
|
|
+ * @param killQueue Whether to shutdown this thread's queue
|
|
+ * @return whether this thread shut down the queue
|
|
+ * @see #halt(boolean)
|
|
+ */
|
|
+ public boolean close(final boolean wait, final boolean killQueue) {
|
|
+ final boolean ret = killQueue && this.queue.shutdown();
|
|
+ this.threadShutdown = true;
|
|
+
|
|
+ // force thread to respond to the shutdown
|
|
+ this.setThreadParkedVolatile(false);
|
|
+ LockSupport.unpark(this);
|
|
+
|
|
+ if (wait) {
|
|
+ boolean interrupted = false;
|
|
+ for (;;) {
|
|
+ if (this.isAlive()) {
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ try {
|
|
+ this.join();
|
|
+ } catch (final InterruptedException ex) {
|
|
+ interrupted = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+
|
|
+ /**
|
|
+ * Causes this thread to exit without draining the queue. To ensure tasks are completed, use {@link #close(boolean, boolean)}.
|
|
+ * <p>
|
|
+ * This is not safe to call with {@link #close(boolean, boolean)} if <code>wait = true</code>, in which case
|
|
+ * the waiting thread may block indefinitely.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * @param killQueue Whether to shutdown this thread's queue
|
|
+ * @see #close(boolean, boolean)
|
|
+ */
|
|
+ public void halt(final boolean killQueue) {
|
|
+ if (killQueue) {
|
|
+ this.queue.shutdown();
|
|
+ }
|
|
+ this.threadShutdown = true;
|
|
+ this.halted = true;
|
|
+
|
|
+ // force thread to respond to the shutdown
|
|
+ this.setThreadParkedVolatile(false);
|
|
+ LockSupport.unpark(this);
|
|
+ }
|
|
+
|
|
+ protected final boolean getThreadParkedVolatile() {
|
|
+ return (boolean)THREAD_PARKED_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ protected final boolean exchangeThreadParkedVolatile(final boolean value) {
|
|
+ return (boolean)THREAD_PARKED_HANDLE.getAndSet(this, value);
|
|
+ }
|
|
+
|
|
+ protected final void setThreadParkedVolatile(final boolean value) {
|
|
+ THREAD_PARKED_HANDLE.setVolatile(this, value);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Required so that queue() can notify (unpark) this thread
|
|
+ */
|
|
+ private final class WrappedTask implements PrioritisedTask {
|
|
+ private final PrioritisedTask queueTask;
|
|
+
|
|
+ public WrappedTask(final PrioritisedTask queueTask) {
|
|
+ this.queueTask = queueTask;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor getExecutor() {
|
|
+ return PrioritisedQueueExecutorThread.this;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean queue() {
|
|
+ final boolean ret = this.queueTask.queue();
|
|
+ if (ret) {
|
|
+ PrioritisedQueueExecutorThread.this.notifyTasks();
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isQueued() {
|
|
+ return this.queueTask.isQueued();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ return this.queueTask.cancel();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean execute() {
|
|
+ return this.queueTask.execute();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Priority getPriority() {
|
|
+ return this.queueTask.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriority(final Priority priority) {
|
|
+ return this.queueTask.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raisePriority(final Priority priority) {
|
|
+ return this.queueTask.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerPriority(final Priority priority) {
|
|
+ return this.queueTask.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getSubOrder() {
|
|
+ return this.queueTask.getSubOrder();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setSubOrder(final long subOrder) {
|
|
+ return this.queueTask.setSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raiseSubOrder(final long subOrder) {
|
|
+ return this.queueTask.raiseSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerSubOrder(final long subOrder) {
|
|
+ return this.queueTask.lowerSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder) {
|
|
+ return this.queueTask.setPriorityAndSubOrder(priority, subOrder);
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedThreadPool.java b/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedThreadPool.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..cb9df914a9a6d0d3f58fa58d8c93f4f583416cd1
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/executor/thread/PrioritisedThreadPool.java
|
|
@@ -0,0 +1,741 @@
|
|
+package ca.spottedleaf.concurrentutil.executor.thread;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.executor.queue.PrioritisedTaskQueue;
|
|
+import ca.spottedleaf.concurrentutil.util.Priority;
|
|
+import ca.spottedleaf.concurrentutil.util.TimeUtil;
|
|
+import org.slf4j.Logger;
|
|
+import org.slf4j.LoggerFactory;
|
|
+import java.lang.reflect.Array;
|
|
+import java.util.Arrays;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+import java.util.function.Consumer;
|
|
+
|
|
+public final class PrioritisedThreadPool {
|
|
+
|
|
+ private static final Logger LOGGER = LoggerFactory.getLogger(PrioritisedThreadPool.class);
|
|
+
|
|
+ private final Consumer<Thread> threadModifier;
|
|
+ private final COWArrayList<ExecutorGroup> executors = new COWArrayList<>(ExecutorGroup.class);
|
|
+ private final COWArrayList<PrioritisedThread> threads = new COWArrayList<>(PrioritisedThread.class);
|
|
+ private final COWArrayList<PrioritisedThread> aliveThreads = new COWArrayList<>(PrioritisedThread.class);
|
|
+
|
|
+ private static final Priority HIGH_PRIORITY_NOTIFY_THRESHOLD = Priority.HIGH;
|
|
+ private static final Priority QUEUE_SHUTDOWN_PRIORITY = Priority.HIGH;
|
|
+
|
|
+ private boolean shutdown;
|
|
+
|
|
+ public PrioritisedThreadPool(final Consumer<Thread> threadModifier) {
|
|
+ this.threadModifier = threadModifier;
|
|
+
|
|
+ if (threadModifier == null) {
|
|
+ throw new NullPointerException("Thread factory may not be null");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Thread[] getAliveThreads() {
|
|
+ final PrioritisedThread[] threads = this.aliveThreads.getArray();
|
|
+
|
|
+ return Arrays.copyOf(threads, threads.length, Thread[].class);
|
|
+ }
|
|
+
|
|
+ public Thread[] getCoreThreads() {
|
|
+ final PrioritisedThread[] threads = this.threads.getArray();
|
|
+
|
|
+ return Arrays.copyOf(threads, threads.length, Thread[].class);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Prevents creation of new queues, shutdowns all non-shutdown queues if specified
|
|
+ */
|
|
+ public void halt(final boolean shutdownQueues) {
|
|
+ synchronized (this) {
|
|
+ this.shutdown = true;
|
|
+ }
|
|
+
|
|
+ if (shutdownQueues) {
|
|
+ for (final ExecutorGroup group : this.executors.getArray()) {
|
|
+ for (final ExecutorGroup.ThreadPoolExecutor executor : group.executors.getArray()) {
|
|
+ executor.shutdown();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (final PrioritisedThread thread : this.threads.getArray()) {
|
|
+ thread.halt(false);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Waits until all threads in this pool have shutdown, or until the specified time has passed.
|
|
+ * @param msToWait Maximum time to wait.
|
|
+ * @return {@code false} if the maximum time passed, {@code true} otherwise.
|
|
+ */
|
|
+ public boolean join(final long msToWait) {
|
|
+ try {
|
|
+ return this.join(msToWait, false);
|
|
+ } catch (final InterruptedException ex) {
|
|
+ throw new IllegalStateException(ex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Waits until all threads in this pool have shutdown, or until the specified time has passed.
|
|
+ * @param msToWait Maximum time to wait.
|
|
+ * @return {@code false} if the maximum time passed, {@code true} otherwise.
|
|
+ * @throws InterruptedException If this thread is interrupted.
|
|
+ */
|
|
+ public boolean joinInterruptable(final long msToWait) throws InterruptedException {
|
|
+ return this.join(msToWait, true);
|
|
+ }
|
|
+
|
|
+ protected final boolean join(final long msToWait, final boolean interruptable) throws InterruptedException {
|
|
+ final long nsToWait = msToWait * (1000 * 1000);
|
|
+ final long start = System.nanoTime();
|
|
+ final long deadline = start + nsToWait;
|
|
+ boolean interrupted = false;
|
|
+ try {
|
|
+ for (final PrioritisedThread thread : this.aliveThreads.getArray()) {
|
|
+ for (;;) {
|
|
+ if (!thread.isAlive()) {
|
|
+ break;
|
|
+ }
|
|
+ final long current = System.nanoTime();
|
|
+ if (current >= deadline && msToWait > 0L) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ thread.join(msToWait <= 0L ? 0L : Math.max(1L, (deadline - current) / (1000 * 1000)));
|
|
+ } catch (final InterruptedException ex) {
|
|
+ if (interruptable) {
|
|
+ throw ex;
|
|
+ }
|
|
+ interrupted = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ } finally {
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Shuts down this thread pool, optionally waiting for all tasks to be executed.
|
|
+ * This function will invoke {@link PrioritisedExecutor#shutdown()} on all created executors on this
|
|
+ * thread pool.
|
|
+ * @param wait Whether to wait for tasks to be executed
|
|
+ */
|
|
+ public void shutdown(final boolean wait) {
|
|
+ synchronized (this) {
|
|
+ this.shutdown = true;
|
|
+ }
|
|
+
|
|
+ for (final ExecutorGroup group : this.executors.getArray()) {
|
|
+ for (final ExecutorGroup.ThreadPoolExecutor executor : group.executors.getArray()) {
|
|
+ executor.shutdown();
|
|
+ }
|
|
+ }
|
|
+
|
|
+
|
|
+ for (final PrioritisedThread thread : this.threads.getArray()) {
|
|
+ // none of these can be true or else NPE
|
|
+ thread.close(false, false);
|
|
+ }
|
|
+
|
|
+ if (wait) {
|
|
+ this.join(0L);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void die(final PrioritisedThread thread) {
|
|
+ this.aliveThreads.remove(thread);
|
|
+ }
|
|
+
|
|
+ public void adjustThreadCount(final int threads) {
|
|
+ synchronized (this) {
|
|
+ if (this.shutdown) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final PrioritisedThread[] currentThreads = this.threads.getArray();
|
|
+ if (threads == currentThreads.length) {
|
|
+ // no adjustment needed
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (threads < currentThreads.length) {
|
|
+ // we need to trim threads
|
|
+ for (int i = 0, difference = currentThreads.length - threads; i < difference; ++i) {
|
|
+ final PrioritisedThread remove = currentThreads[currentThreads.length - i - 1];
|
|
+
|
|
+ remove.halt(false);
|
|
+ this.threads.remove(remove);
|
|
+ }
|
|
+ } else {
|
|
+ // we need to add threads
|
|
+ for (int i = 0, difference = threads - currentThreads.length; i < difference; ++i) {
|
|
+ final PrioritisedThread thread = new PrioritisedThread();
|
|
+
|
|
+ this.threadModifier.accept(thread);
|
|
+ this.aliveThreads.add(thread);
|
|
+ this.threads.add(thread);
|
|
+
|
|
+ thread.start();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static int compareInsideGroup(final ExecutorGroup.ThreadPoolExecutor src, final Priority srcPriority,
|
|
+ final ExecutorGroup.ThreadPoolExecutor dst, final Priority dstPriority) {
|
|
+ final int priorityCompare = srcPriority.ordinal() - dstPriority.ordinal();
|
|
+ if (priorityCompare != 0) {
|
|
+ return priorityCompare;
|
|
+ }
|
|
+
|
|
+ final int parallelismCompare = src.currentParallelism - dst.currentParallelism;
|
|
+ if (parallelismCompare != 0) {
|
|
+ return parallelismCompare;
|
|
+ }
|
|
+
|
|
+ return TimeUtil.compareTimes(src.lastRetrieved, dst.lastRetrieved);
|
|
+ }
|
|
+
|
|
+ private static int compareOutsideGroup(final ExecutorGroup.ThreadPoolExecutor src, final Priority srcPriority,
|
|
+ final ExecutorGroup.ThreadPoolExecutor dst, final Priority dstPriority) {
|
|
+ if (src.getGroup().division == dst.getGroup().division) {
|
|
+ // can only compare priorities inside the same division
|
|
+ final int priorityCompare = srcPriority.ordinal() - dstPriority.ordinal();
|
|
+ if (priorityCompare != 0) {
|
|
+ return priorityCompare;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final int parallelismCompare = src.getGroup().currentParallelism - dst.getGroup().currentParallelism;
|
|
+ if (parallelismCompare != 0) {
|
|
+ return parallelismCompare;
|
|
+ }
|
|
+
|
|
+ return TimeUtil.compareTimes(src.lastRetrieved, dst.lastRetrieved);
|
|
+ }
|
|
+
|
|
+ private ExecutorGroup.ThreadPoolExecutor obtainQueue() {
|
|
+ final long time = System.nanoTime();
|
|
+ synchronized (this) {
|
|
+ ExecutorGroup.ThreadPoolExecutor ret = null;
|
|
+ Priority retPriority = null;
|
|
+
|
|
+ for (final ExecutorGroup executorGroup : this.executors.getArray()) {
|
|
+ ExecutorGroup.ThreadPoolExecutor highest = null;
|
|
+ Priority highestPriority = null;
|
|
+ for (final ExecutorGroup.ThreadPoolExecutor executor : executorGroup.executors.getArray()) {
|
|
+ final int maxParallelism = executor.maxParallelism;
|
|
+ if (maxParallelism > 0 && executor.currentParallelism >= maxParallelism) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Priority priority = executor.getTargetPriority();
|
|
+
|
|
+ if (priority == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (highestPriority == null || compareInsideGroup(highest, highestPriority, executor, priority) > 0) {
|
|
+ highest = executor;
|
|
+ highestPriority = priority;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (highest == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (ret == null || compareOutsideGroup(ret, retPriority, highest, highestPriority) > 0) {
|
|
+ ret = highest;
|
|
+ retPriority = highestPriority;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ret != null) {
|
|
+ ret.lastRetrieved = time;
|
|
+ ++ret.currentParallelism;
|
|
+ ++ret.getGroup().currentParallelism;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void returnQueue(final ExecutorGroup.ThreadPoolExecutor executor) {
|
|
+ synchronized (this) {
|
|
+ --executor.currentParallelism;
|
|
+ --executor.getGroup().currentParallelism;
|
|
+ }
|
|
+
|
|
+ if (executor.isShutdown() && executor.queue.hasNoScheduledTasks()) {
|
|
+ executor.getGroup().executors.remove(executor);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void notifyAllThreads() {
|
|
+ for (final PrioritisedThread thread : this.threads.getArray()) {
|
|
+ thread.notifyTasks();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public ExecutorGroup createExecutorGroup(final int division, final int flags) {
|
|
+ synchronized (this) {
|
|
+ if (this.shutdown) {
|
|
+ throw new IllegalStateException("Queue is shutdown: " + this.toString());
|
|
+ }
|
|
+
|
|
+ final ExecutorGroup ret = new ExecutorGroup(division, flags);
|
|
+
|
|
+ this.executors.add(ret);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private final class PrioritisedThread extends PrioritisedQueueExecutorThread {
|
|
+
|
|
+ private final AtomicBoolean alertedHighPriority = new AtomicBoolean();
|
|
+
|
|
+ public PrioritisedThread() {
|
|
+ super(null);
|
|
+ }
|
|
+
|
|
+ public boolean alertHighPriorityExecutor() {
|
|
+ if (!this.notifyTasks()) {
|
|
+ if (!this.alertedHighPriority.get()) {
|
|
+ this.alertedHighPriority.set(true);
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ private boolean isAlertedHighPriority() {
|
|
+ return this.alertedHighPriority.get() && this.alertedHighPriority.getAndSet(false);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void die() {
|
|
+ PrioritisedThreadPool.this.die(this);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean pollTasks() {
|
|
+ boolean ret = false;
|
|
+
|
|
+ for (;;) {
|
|
+ if (this.halted) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ final ExecutorGroup.ThreadPoolExecutor executor = PrioritisedThreadPool.this.obtainQueue();
|
|
+ if (executor == null) {
|
|
+ break;
|
|
+ }
|
|
+ final long deadline = System.nanoTime() + executor.queueMaxHoldTime;
|
|
+ do {
|
|
+ try {
|
|
+ if (this.halted || executor.halt) {
|
|
+ break;
|
|
+ }
|
|
+ if (!executor.executeTask()) {
|
|
+ // no more tasks, try next queue
|
|
+ break;
|
|
+ }
|
|
+ ret = true;
|
|
+ } catch (final Throwable throwable) {
|
|
+ LOGGER.error("Exception thrown from thread '" + this.getName() + "' in queue '" + executor.toString() + "'", throwable);
|
|
+ }
|
|
+ } while (!this.isAlertedHighPriority() && System.nanoTime() <= deadline);
|
|
+
|
|
+ PrioritisedThreadPool.this.returnQueue(executor);
|
|
+ }
|
|
+
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final class ExecutorGroup {
|
|
+
|
|
+ private final AtomicLong subOrderGenerator = new AtomicLong();
|
|
+ private final COWArrayList<ThreadPoolExecutor> executors = new COWArrayList<>(ThreadPoolExecutor.class);
|
|
+
|
|
+ private final int division;
|
|
+ private int currentParallelism;
|
|
+
|
|
+ private ExecutorGroup(final int division, final int flags) {
|
|
+ this.division = division;
|
|
+ }
|
|
+
|
|
+ public ThreadPoolExecutor[] getAllExecutors() {
|
|
+ return this.executors.getArray().clone();
|
|
+ }
|
|
+
|
|
+ private PrioritisedThreadPool getThreadPool() {
|
|
+ return PrioritisedThreadPool.this;
|
|
+ }
|
|
+
|
|
+ public ThreadPoolExecutor createExecutor(final int maxParallelism, final long queueMaxHoldTime, final int flags) {
|
|
+ synchronized (PrioritisedThreadPool.this) {
|
|
+ if (PrioritisedThreadPool.this.shutdown) {
|
|
+ throw new IllegalStateException("Queue is shutdown: " + PrioritisedThreadPool.this.toString());
|
|
+ }
|
|
+
|
|
+ final ThreadPoolExecutor ret = new ThreadPoolExecutor(maxParallelism, queueMaxHoldTime, flags);
|
|
+
|
|
+ this.executors.add(ret);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final class ThreadPoolExecutor implements PrioritisedExecutor {
|
|
+
|
|
+ private final PrioritisedTaskQueue queue = new PrioritisedTaskQueue();
|
|
+
|
|
+ private volatile int maxParallelism;
|
|
+ private final long queueMaxHoldTime;
|
|
+ private volatile int currentParallelism;
|
|
+ private volatile boolean halt;
|
|
+ private long lastRetrieved = System.nanoTime();
|
|
+
|
|
+ private ThreadPoolExecutor(final int maxParallelism, final long queueMaxHoldTime, final int flags) {
|
|
+ this.maxParallelism = maxParallelism;
|
|
+ this.queueMaxHoldTime = queueMaxHoldTime;
|
|
+ }
|
|
+
|
|
+ private ExecutorGroup getGroup() {
|
|
+ return ExecutorGroup.this;
|
|
+ }
|
|
+
|
|
+ private boolean canNotify() {
|
|
+ if (this.halt) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final int max = this.maxParallelism;
|
|
+ return max < 0 || this.currentParallelism < max;
|
|
+ }
|
|
+
|
|
+ private void notifyHighPriority() {
|
|
+ if (!this.canNotify()) {
|
|
+ return;
|
|
+ }
|
|
+ for (final PrioritisedThread thread : this.getGroup().getThreadPool().threads.getArray()) {
|
|
+ if (thread.alertHighPriorityExecutor()) {
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void notifyScheduled() {
|
|
+ if (!this.canNotify()) {
|
|
+ return;
|
|
+ }
|
|
+ for (final PrioritisedThread thread : this.getGroup().getThreadPool().threads.getArray()) {
|
|
+ if (thread.notifyTasks()) {
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Removes this queue from the thread pool without shutting the queue down or waiting for queued tasks to be executed
|
|
+ */
|
|
+ public void halt() {
|
|
+ this.halt = true;
|
|
+
|
|
+ ExecutorGroup.this.executors.remove(this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether this executor is scheduled to run tasks or is running tasks, otherwise it returns whether
|
|
+ * this queue is not halted and not shutdown.
|
|
+ */
|
|
+ public boolean isActive() {
|
|
+ if (this.halt) {
|
|
+ return this.currentParallelism > 0;
|
|
+ } else {
|
|
+ if (!this.isShutdown()) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return !this.queue.hasNoScheduledTasks();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean shutdown() {
|
|
+ if (!this.queue.shutdown()) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (this.queue.hasNoScheduledTasks()) {
|
|
+ ExecutorGroup.this.executors.remove(this);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isShutdown() {
|
|
+ return this.queue.isShutdown();
|
|
+ }
|
|
+
|
|
+ public void setMaxParallelism(final int maxParallelism) {
|
|
+ this.maxParallelism = maxParallelism;
|
|
+ // assume that we could have increased the parallelism
|
|
+ if (this.getTargetPriority() != null) {
|
|
+ ExecutorGroup.this.getThreadPool().notifyAllThreads();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ Priority getTargetPriority() {
|
|
+ final Priority ret = this.queue.getHighestPriority();
|
|
+ if (!this.isShutdown()) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return ret == null ? QUEUE_SHUTDOWN_PRIORITY : Priority.max(ret, QUEUE_SHUTDOWN_PRIORITY);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksScheduled() {
|
|
+ return this.queue.getTotalTasksScheduled();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getTotalTasksExecuted() {
|
|
+ return this.queue.getTotalTasksExecuted();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long generateNextSubOrder() {
|
|
+ return ExecutorGroup.this.subOrderGenerator.getAndIncrement();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean executeTask() {
|
|
+ return this.queue.executeTask();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task) {
|
|
+ final PrioritisedTask ret = this.createTask(task);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority) {
|
|
+ final PrioritisedTask ret = this.createTask(task, priority);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ final PrioritisedTask ret = this.createTask(task, priority, subOrder);
|
|
+
|
|
+ ret.queue();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task) {
|
|
+ return this.createTask(task, Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority) {
|
|
+ return this.createTask(task, priority, this.generateNextSubOrder());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder) {
|
|
+ return new WrappedTask(this.queue.createTask(task, priority, subOrder));
|
|
+ }
|
|
+
|
|
+ private final class WrappedTask implements PrioritisedTask {
|
|
+
|
|
+ private final PrioritisedTask wrapped;
|
|
+
|
|
+ private WrappedTask(final PrioritisedTask wrapped) {
|
|
+ this.wrapped = wrapped;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor getExecutor() {
|
|
+ return ThreadPoolExecutor.this;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean queue() {
|
|
+ if (this.wrapped.queue()) {
|
|
+ final Priority priority = this.getPriority();
|
|
+ if (priority != Priority.COMPLETING) {
|
|
+ if (priority.isHigherOrEqualPriority(HIGH_PRIORITY_NOTIFY_THRESHOLD)) {
|
|
+ ThreadPoolExecutor.this.notifyHighPriority();
|
|
+ } else {
|
|
+ ThreadPoolExecutor.this.notifyScheduled();
|
|
+ }
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isQueued() {
|
|
+ return this.wrapped.isQueued();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ return this.wrapped.cancel();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean execute() {
|
|
+ return this.wrapped.execute();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Priority getPriority() {
|
|
+ return this.wrapped.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriority(final Priority priority) {
|
|
+ if (this.wrapped.setPriority(priority)) {
|
|
+ if (priority.isHigherOrEqualPriority(HIGH_PRIORITY_NOTIFY_THRESHOLD)) {
|
|
+ ThreadPoolExecutor.this.notifyHighPriority();
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raisePriority(final Priority priority) {
|
|
+ if (this.wrapped.raisePriority(priority)) {
|
|
+ if (priority.isHigherOrEqualPriority(HIGH_PRIORITY_NOTIFY_THRESHOLD)) {
|
|
+ ThreadPoolExecutor.this.notifyHighPriority();
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerPriority(final Priority priority) {
|
|
+ return this.wrapped.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long getSubOrder() {
|
|
+ return this.wrapped.getSubOrder();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setSubOrder(final long subOrder) {
|
|
+ return this.wrapped.setSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean raiseSubOrder(final long subOrder) {
|
|
+ return this.wrapped.raiseSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean lowerSubOrder(final long subOrder) {
|
|
+ return this.wrapped.lowerSubOrder(subOrder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder) {
|
|
+ if (this.wrapped.setPriorityAndSubOrder(priority, subOrder)) {
|
|
+ if (priority.isHigherOrEqualPriority(HIGH_PRIORITY_NOTIFY_THRESHOLD)) {
|
|
+ ThreadPoolExecutor.this.notifyHighPriority();
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class COWArrayList<E> {
|
|
+
|
|
+ private volatile E[] array;
|
|
+
|
|
+ public COWArrayList(final Class<E> clazz) {
|
|
+ this.array = (E[])Array.newInstance(clazz, 0);
|
|
+ }
|
|
+
|
|
+ public E[] getArray() {
|
|
+ return this.array;
|
|
+ }
|
|
+
|
|
+ public void add(final E element) {
|
|
+ synchronized (this) {
|
|
+ final E[] array = this.array;
|
|
+
|
|
+ final E[] copy = Arrays.copyOf(array, array.length + 1);
|
|
+ copy[array.length] = element;
|
|
+
|
|
+ this.array = copy;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean remove(final E element) {
|
|
+ synchronized (this) {
|
|
+ final E[] array = this.array;
|
|
+ int index = -1;
|
|
+ for (int i = 0, len = array.length; i < len; ++i) {
|
|
+ if (array[i] == element) {
|
|
+ index = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (index == -1) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final E[] copy = (E[])Array.newInstance(array.getClass().getComponentType(), array.length - 1);
|
|
+
|
|
+ System.arraycopy(array, 0, copy, 0, index);
|
|
+ System.arraycopy(array, index + 1, copy, index, (array.length - 1) - index);
|
|
+
|
|
+ this.array = copy;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..94bfd7c56ffcea7d6491e94a7804bc3bd60fe9c3
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLong1Function.java
|
|
@@ -0,0 +1,8 @@
|
|
+package ca.spottedleaf.concurrentutil.function;
|
|
+
|
|
+@FunctionalInterface
|
|
+public interface BiLong1Function<T, R> {
|
|
+
|
|
+ public R apply(final long t1, final T t2);
|
|
+
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..8e7eef07960a18d0593688eba55adfa1c85efadf
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/function/BiLongObjectConsumer.java
|
|
@@ -0,0 +1,8 @@
|
|
+package ca.spottedleaf.concurrentutil.function;
|
|
+
|
|
+@FunctionalInterface
|
|
+public interface BiLongObjectConsumer<V> {
|
|
+
|
|
+ public void accept(final long key, final V value);
|
|
+
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..7ffe4379b06c03c56abbcbdee3bb720894a10702
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/ReentrantAreaLock.java
|
|
@@ -0,0 +1,350 @@
|
|
+package ca.spottedleaf.concurrentutil.lock;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
|
|
+import ca.spottedleaf.concurrentutil.map.ConcurrentLong2ReferenceChainedHashTable;
|
|
+import ca.spottedleaf.concurrentutil.util.IntPairUtil;
|
|
+import java.util.Objects;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+
|
|
+public final class ReentrantAreaLock {
|
|
+
|
|
+ public final int coordinateShift;
|
|
+
|
|
+ // aggressive load factor to reduce contention
|
|
+ private final ConcurrentLong2ReferenceChainedHashTable<Node> nodes = ConcurrentLong2ReferenceChainedHashTable.createWithCapacity(128, 0.2f);
|
|
+
|
|
+ public ReentrantAreaLock(final int coordinateShift) {
|
|
+ this.coordinateShift = coordinateShift;
|
|
+ }
|
|
+
|
|
+ public boolean isHeldByCurrentThread(final int x, final int z) {
|
|
+ final Thread currThread = Thread.currentThread();
|
|
+ final int shift = this.coordinateShift;
|
|
+ final int sectionX = x >> shift;
|
|
+ final int sectionZ = z >> shift;
|
|
+
|
|
+ final long coordinate = IntPairUtil.key(sectionX, sectionZ);
|
|
+ final Node node = this.nodes.get(coordinate);
|
|
+
|
|
+ return node != null && node.thread == currThread;
|
|
+ }
|
|
+
|
|
+ public boolean isHeldByCurrentThread(final int centerX, final int centerZ, final int radius) {
|
|
+ return this.isHeldByCurrentThread(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
|
|
+ }
|
|
+
|
|
+ public boolean isHeldByCurrentThread(final int fromX, final int fromZ, final int toX, final int toZ) {
|
|
+ if (fromX > toX || fromZ > toZ) {
|
|
+ throw new IllegalArgumentException();
|
|
+ }
|
|
+
|
|
+ final Thread currThread = Thread.currentThread();
|
|
+ final int shift = this.coordinateShift;
|
|
+ final int fromSectionX = fromX >> shift;
|
|
+ final int fromSectionZ = fromZ >> shift;
|
|
+ final int toSectionX = toX >> shift;
|
|
+ final int toSectionZ = toZ >> shift;
|
|
+
|
|
+ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
|
|
+ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
|
|
+ final long coordinate = IntPairUtil.key(currX, currZ);
|
|
+
|
|
+ final Node node = this.nodes.get(coordinate);
|
|
+
|
|
+ if (node == null || node.thread != currThread) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public Node tryLock(final int x, final int z) {
|
|
+ return this.tryLock(x, z, x, z);
|
|
+ }
|
|
+
|
|
+ public Node tryLock(final int centerX, final int centerZ, final int radius) {
|
|
+ return this.tryLock(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
|
|
+ }
|
|
+
|
|
+ public Node tryLock(final int fromX, final int fromZ, final int toX, final int toZ) {
|
|
+ if (fromX > toX || fromZ > toZ) {
|
|
+ throw new IllegalArgumentException();
|
|
+ }
|
|
+
|
|
+ final Thread currThread = Thread.currentThread();
|
|
+ final int shift = this.coordinateShift;
|
|
+ final int fromSectionX = fromX >> shift;
|
|
+ final int fromSectionZ = fromZ >> shift;
|
|
+ final int toSectionX = toX >> shift;
|
|
+ final int toSectionZ = toZ >> shift;
|
|
+
|
|
+ final long[] areaAffected = new long[(toSectionX - fromSectionX + 1) * (toSectionZ - fromSectionZ + 1)];
|
|
+ int areaAffectedLen = 0;
|
|
+
|
|
+ final Node ret = new Node(this, areaAffected, currThread);
|
|
+
|
|
+ boolean failed = false;
|
|
+
|
|
+ // try to fast acquire area
|
|
+ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
|
|
+ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
|
|
+ final long coordinate = IntPairUtil.key(currX, currZ);
|
|
+
|
|
+ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
|
|
+
|
|
+ if (prev == null) {
|
|
+ areaAffected[areaAffectedLen++] = coordinate;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (prev.thread != currThread) {
|
|
+ failed = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!failed) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // failed, undo logic
|
|
+ if (areaAffectedLen != 0) {
|
|
+ for (int i = 0; i < areaAffectedLen; ++i) {
|
|
+ final long key = areaAffected[i];
|
|
+
|
|
+ if (this.nodes.remove(key) != ret) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ areaAffectedLen = 0;
|
|
+
|
|
+ // since we inserted, we need to drain waiters
|
|
+ Thread unpark;
|
|
+ while ((unpark = ret.pollOrBlockAdds()) != null) {
|
|
+ LockSupport.unpark(unpark);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ public Node lock(final int x, final int z) {
|
|
+ final Thread currThread = Thread.currentThread();
|
|
+ final int shift = this.coordinateShift;
|
|
+ final int sectionX = x >> shift;
|
|
+ final int sectionZ = z >> shift;
|
|
+
|
|
+ final long coordinate = IntPairUtil.key(sectionX, sectionZ);
|
|
+ final long[] areaAffected = new long[1];
|
|
+ areaAffected[0] = coordinate;
|
|
+
|
|
+ final Node ret = new Node(this, areaAffected, currThread);
|
|
+
|
|
+ for (long failures = 0L;;) {
|
|
+ final Node park;
|
|
+
|
|
+ // try to fast acquire area
|
|
+ {
|
|
+ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
|
|
+
|
|
+ if (prev == null) {
|
|
+ ret.areaAffectedLen = 1;
|
|
+ return ret;
|
|
+ } else if (prev.thread != currThread) {
|
|
+ park = prev;
|
|
+ } else {
|
|
+ // only one node we would want to acquire, and it's owned by this thread already
|
|
+ // areaAffectedLen = 0 already
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+
|
|
+ if (failures > 128L && park.add(currThread)) {
|
|
+ LockSupport.park();
|
|
+ } else {
|
|
+ // high contention, spin wait
|
|
+ if (failures < 128L) {
|
|
+ for (long i = 0; i < failures; ++i) {
|
|
+ Thread.onSpinWait();
|
|
+ }
|
|
+ failures = failures << 1;
|
|
+ } else if (failures < 1_200L) {
|
|
+ LockSupport.parkNanos(1_000L);
|
|
+ failures = failures + 1L;
|
|
+ } else { // scale 0.1ms (100us) per failure
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos(100_000L * failures);
|
|
+ failures = failures + 1L;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Node lock(final int centerX, final int centerZ, final int radius) {
|
|
+ return this.lock(centerX - radius, centerZ - radius, centerX + radius, centerZ + radius);
|
|
+ }
|
|
+
|
|
+ public Node lock(final int fromX, final int fromZ, final int toX, final int toZ) {
|
|
+ if (fromX > toX || fromZ > toZ) {
|
|
+ throw new IllegalArgumentException();
|
|
+ }
|
|
+
|
|
+ final Thread currThread = Thread.currentThread();
|
|
+ final int shift = this.coordinateShift;
|
|
+ final int fromSectionX = fromX >> shift;
|
|
+ final int fromSectionZ = fromZ >> shift;
|
|
+ final int toSectionX = toX >> shift;
|
|
+ final int toSectionZ = toZ >> shift;
|
|
+
|
|
+ if (((fromSectionX ^ toSectionX) | (fromSectionZ ^ toSectionZ)) == 0) {
|
|
+ return this.lock(fromX, fromZ);
|
|
+ }
|
|
+
|
|
+ final long[] areaAffected = new long[(toSectionX - fromSectionX + 1) * (toSectionZ - fromSectionZ + 1)];
|
|
+ int areaAffectedLen = 0;
|
|
+
|
|
+ final Node ret = new Node(this, areaAffected, currThread);
|
|
+
|
|
+ for (long failures = 0L;;) {
|
|
+ Node park = null;
|
|
+ boolean addedToArea = false;
|
|
+ boolean alreadyOwned = false;
|
|
+ boolean allOwned = true;
|
|
+
|
|
+ // try to fast acquire area
|
|
+ for (int currZ = fromSectionZ; currZ <= toSectionZ; ++currZ) {
|
|
+ for (int currX = fromSectionX; currX <= toSectionX; ++currX) {
|
|
+ final long coordinate = IntPairUtil.key(currX, currZ);
|
|
+
|
|
+ final Node prev = this.nodes.putIfAbsent(coordinate, ret);
|
|
+
|
|
+ if (prev == null) {
|
|
+ addedToArea = true;
|
|
+ allOwned = false;
|
|
+ areaAffected[areaAffectedLen++] = coordinate;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (prev.thread != currThread) {
|
|
+ park = prev;
|
|
+ alreadyOwned = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // check for failure
|
|
+ if ((park != null && addedToArea) || (park == null && alreadyOwned && !allOwned)) {
|
|
+ // failure to acquire: added and we need to block, or improper lock usage
|
|
+ for (int i = 0; i < areaAffectedLen; ++i) {
|
|
+ final long key = areaAffected[i];
|
|
+
|
|
+ if (this.nodes.remove(key) != ret) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ areaAffectedLen = 0;
|
|
+
|
|
+ // since we inserted, we need to drain waiters
|
|
+ Thread unpark;
|
|
+ while ((unpark = ret.pollOrBlockAdds()) != null) {
|
|
+ LockSupport.unpark(unpark);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (park == null) {
|
|
+ if (alreadyOwned && !allOwned) {
|
|
+ throw new IllegalStateException("Improper lock usage: Should never acquire intersecting areas");
|
|
+ }
|
|
+ ret.areaAffectedLen = areaAffectedLen;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // failed
|
|
+
|
|
+ ++failures;
|
|
+
|
|
+ if (failures > 128L && park.add(currThread)) {
|
|
+ LockSupport.park(park);
|
|
+ } else {
|
|
+ // high contention, spin wait
|
|
+ if (failures < 128L) {
|
|
+ for (long i = 0; i < failures; ++i) {
|
|
+ Thread.onSpinWait();
|
|
+ }
|
|
+ failures = failures << 1;
|
|
+ } else if (failures < 1_200L) {
|
|
+ LockSupport.parkNanos(1_000L);
|
|
+ failures = failures + 1L;
|
|
+ } else { // scale 0.1ms (100us) per failure
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos(100_000L * failures);
|
|
+ failures = failures + 1L;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (addedToArea) {
|
|
+ // try again, so we need to allow adds so that other threads can properly block on us
|
|
+ ret.allowAdds();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void unlock(final Node node) {
|
|
+ if (node.lock != this) {
|
|
+ throw new IllegalStateException("Unlock target lock mismatch");
|
|
+ }
|
|
+
|
|
+ final long[] areaAffected = node.areaAffected;
|
|
+ final int areaAffectedLen = node.areaAffectedLen;
|
|
+
|
|
+ if (areaAffectedLen == 0) {
|
|
+ // here we are not in the node map, and so do not need to remove from the node map or unblock any waiters
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ Objects.checkFromToIndex(0, areaAffectedLen, areaAffected.length);
|
|
+
|
|
+ // remove from node map; allowing other threads to lock
|
|
+ for (int i = 0; i < areaAffectedLen; ++i) {
|
|
+ final long coordinate = areaAffected[i];
|
|
+ if (this.nodes.remove(coordinate, node) != node) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ Thread unpark;
|
|
+ while ((unpark = node.pollOrBlockAdds()) != null) {
|
|
+ LockSupport.unpark(unpark);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class Node extends MultiThreadedQueue<Thread> {
|
|
+
|
|
+ private final ReentrantAreaLock lock;
|
|
+ private final long[] areaAffected;
|
|
+ private int areaAffectedLen;
|
|
+ private final Thread thread;
|
|
+
|
|
+ private Node(final ReentrantAreaLock lock, final long[] areaAffected, final Thread thread) {
|
|
+ this.lock = lock;
|
|
+ this.areaAffected = areaAffected;
|
|
+ this.thread = thread;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "Node{" +
|
|
+ "areaAffected=" + IntPairUtil.toString(this.areaAffected, 0, this.areaAffectedLen) +
|
|
+ ", thread=" + this.thread +
|
|
+ '}';
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..6918f130099e6c19e20a47bfdb54915cdd13732a
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/map/ConcurrentLong2ReferenceChainedHashTable.java
|
|
@@ -0,0 +1,1704 @@
|
|
+package ca.spottedleaf.concurrentutil.map;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.function.BiLong1Function;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.HashUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.IntegerUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.ThrowUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Validate;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Arrays;
|
|
+import java.util.Iterator;
|
|
+import java.util.NoSuchElementException;
|
|
+import java.util.PrimitiveIterator;
|
|
+import java.util.concurrent.atomic.LongAdder;
|
|
+import java.util.function.BiFunction;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Function;
|
|
+import java.util.function.LongConsumer;
|
|
+import java.util.function.LongFunction;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+/**
|
|
+ * Concurrent hashtable implementation supporting mapping arbitrary {@code long} values onto non-null {@code Object}
|
|
+ * values with support for multiple writer and multiple reader threads.
|
|
+ *
|
|
+ * <p><h3>Happens-before relationship</h3></p>
|
|
+ * <p>
|
|
+ * As with {@link java.util.concurrent.ConcurrentMap}, there is a happens-before relationship between actions in one thread
|
|
+ * prior to writing to the map and access to the results of those actions in another thread.
|
|
+ * </p>
|
|
+ *
|
|
+ * <p><h3>Atomicity of functional methods</h3></p>
|
|
+ * <p>
|
|
+ * Functional methods are functions declared in this class which possibly perform a write (remove, replace, or modify)
|
|
+ * to an entry in this map as a result of invoking a function on an input parameter. For example, {@link #compute(long, BiLong1Function)},
|
|
+ * {@link #merge(long, Object, BiFunction)} and {@link #removeIf(long, Predicate)} are examples of functional methods.
|
|
+ * Functional methods will be performed atomically, that is, the input parameter is guaranteed to only be invoked at most
|
|
+ * once per function call. The consequence of this behavior however is that a critical lock for a bin entry is held, which
|
|
+ * means that if the input parameter invocation makes additional calls to write into this hash table that the result
|
|
+ * is undefined and deadlock-prone.
|
|
+ * </p>
|
|
+ *
|
|
+ * @param <V>
|
|
+ * @see java.util.concurrent.ConcurrentMap
|
|
+ */
|
|
+public class ConcurrentLong2ReferenceChainedHashTable<V> implements Iterable<ConcurrentLong2ReferenceChainedHashTable.TableEntry<V>> {
|
|
+
|
|
+ protected static final int DEFAULT_CAPACITY = 16;
|
|
+ protected static final float DEFAULT_LOAD_FACTOR = 0.75f;
|
|
+ protected static final int MAXIMUM_CAPACITY = Integer.MIN_VALUE >>> 1;
|
|
+
|
|
+ protected final LongAdder size = new LongAdder();
|
|
+ protected final float loadFactor;
|
|
+
|
|
+ protected volatile TableEntry<V>[] table;
|
|
+
|
|
+ protected static final int THRESHOLD_NO_RESIZE = -1;
|
|
+ protected static final int THRESHOLD_RESIZING = -2;
|
|
+ protected volatile int threshold;
|
|
+ protected static final VarHandle THRESHOLD_HANDLE = ConcurrentUtil.getVarHandle(ConcurrentLong2ReferenceChainedHashTable.class, "threshold", int.class);
|
|
+
|
|
+ protected final int getThresholdAcquire() {
|
|
+ return (int)THRESHOLD_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final int getThresholdVolatile() {
|
|
+ return (int)THRESHOLD_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ protected final void setThresholdPlain(final int threshold) {
|
|
+ THRESHOLD_HANDLE.set(this, threshold);
|
|
+ }
|
|
+
|
|
+ protected final void setThresholdRelease(final int threshold) {
|
|
+ THRESHOLD_HANDLE.setRelease(this, threshold);
|
|
+ }
|
|
+
|
|
+ protected final void setThresholdVolatile(final int threshold) {
|
|
+ THRESHOLD_HANDLE.setVolatile(this, threshold);
|
|
+ }
|
|
+
|
|
+ protected final int compareExchangeThresholdVolatile(final int expect, final int update) {
|
|
+ return (int)THRESHOLD_HANDLE.compareAndExchange(this, expect, update);
|
|
+ }
|
|
+
|
|
+ public ConcurrentLong2ReferenceChainedHashTable() {
|
|
+ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ protected static int getTargetThreshold(final int capacity, final float loadFactor) {
|
|
+ final double ret = (double)capacity * (double)loadFactor;
|
|
+ if (Double.isInfinite(ret) || ret >= ((double)Integer.MAX_VALUE)) {
|
|
+ return THRESHOLD_NO_RESIZE;
|
|
+ }
|
|
+
|
|
+ return (int)Math.ceil(ret);
|
|
+ }
|
|
+
|
|
+ protected static int getCapacityFor(final int capacity) {
|
|
+ if (capacity <= 0) {
|
|
+ throw new IllegalArgumentException("Invalid capacity: " + capacity);
|
|
+ }
|
|
+ if (capacity >= MAXIMUM_CAPACITY) {
|
|
+ return MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ return IntegerUtil.roundCeilLog2(capacity);
|
|
+ }
|
|
+
|
|
+ protected ConcurrentLong2ReferenceChainedHashTable(final int capacity, final float loadFactor) {
|
|
+ final int tableSize = getCapacityFor(capacity);
|
|
+
|
|
+ if (loadFactor <= 0.0 || !Float.isFinite(loadFactor)) {
|
|
+ throw new IllegalArgumentException("Invalid load factor: " + loadFactor);
|
|
+ }
|
|
+
|
|
+ if (tableSize == MAXIMUM_CAPACITY) {
|
|
+ this.setThresholdPlain(THRESHOLD_NO_RESIZE);
|
|
+ } else {
|
|
+ this.setThresholdPlain(getTargetThreshold(tableSize, loadFactor));
|
|
+ }
|
|
+
|
|
+ this.loadFactor = loadFactor;
|
|
+ // noinspection unchecked
|
|
+ this.table = (TableEntry<V>[])new TableEntry[tableSize];
|
|
+ }
|
|
+
|
|
+ public static <V> ConcurrentLong2ReferenceChainedHashTable<V> createWithCapacity(final int capacity) {
|
|
+ return createWithCapacity(capacity, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ public static <V> ConcurrentLong2ReferenceChainedHashTable<V> createWithCapacity(final int capacity, final float loadFactor) {
|
|
+ return new ConcurrentLong2ReferenceChainedHashTable<>(capacity, loadFactor);
|
|
+ }
|
|
+
|
|
+ public static <V> ConcurrentLong2ReferenceChainedHashTable<V> createWithExpected(final int expected) {
|
|
+ return createWithExpected(expected, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ public static <V> ConcurrentLong2ReferenceChainedHashTable<V> createWithExpected(final int expected, final float loadFactor) {
|
|
+ final int capacity = (int)Math.ceil((double)expected / (double)loadFactor);
|
|
+
|
|
+ return createWithCapacity(capacity, loadFactor);
|
|
+ }
|
|
+
|
|
+ /** must be deterministic given a key */
|
|
+ protected static int getHash(final long key) {
|
|
+ return (int)HashUtil.mix(key);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the load factor associated with this map.
|
|
+ */
|
|
+ public final float getLoadFactor() {
|
|
+ return this.loadFactor;
|
|
+ }
|
|
+
|
|
+ protected static <V> TableEntry<V> getAtIndexVolatile(final TableEntry<V>[] table, final int index) {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getVolatile(table, index);
|
|
+ }
|
|
+
|
|
+ protected static <V> void setAtIndexRelease(final TableEntry<V>[] table, final int index, final TableEntry<V> value) {
|
|
+ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
|
|
+ }
|
|
+
|
|
+ protected static <V> void setAtIndexVolatile(final TableEntry<V>[] table, final int index, final TableEntry<V> value) {
|
|
+ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setVolatile(table, index, value);
|
|
+ }
|
|
+
|
|
+ protected static <V> TableEntry<V> compareAndExchangeAtIndexVolatile(final TableEntry<V>[] table, final int index,
|
|
+ final TableEntry<V> expect, final TableEntry<V> update) {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.compareAndExchange(table, index, expect, update);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the possible node associated with the key, or {@code null} if there is no such node. The node
|
|
+ * returned may have a {@code null} {@link TableEntry#value}, in which case the node is a placeholder for
|
|
+ * a compute/computeIfAbsent call. The placeholder node should not be considered mapped in order to preserve
|
|
+ * happens-before relationships between writes and reads in the map.
|
|
+ */
|
|
+ protected final TableEntry<V> getNode(final long key) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ for (;;) {
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, hash & (table.length - 1));
|
|
+
|
|
+ if (node == null) {
|
|
+ // node == null
|
|
+ return node;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ for (; node != null; node = node.getNextVolatile()) {
|
|
+ if (node.key == key) {
|
|
+ return node;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // node == null
|
|
+ return node;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the currently mapped value associated with the specified key, or {@code null} if there is none.
|
|
+ *
|
|
+ * @param key Specified key
|
|
+ */
|
|
+ public V get(final long key) {
|
|
+ final TableEntry<V> node = this.getNode(key);
|
|
+ return node == null ? null : node.getValueVolatile();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the currently mapped value associated with the specified key, or the specified default value if there is none.
|
|
+ *
|
|
+ * @param key Specified key
|
|
+ * @param defaultValue Specified default value
|
|
+ */
|
|
+ public V getOrDefault(final long key, final V defaultValue) {
|
|
+ final TableEntry<V> node = this.getNode(key);
|
|
+ if (node == null) {
|
|
+ return defaultValue;
|
|
+ }
|
|
+
|
|
+ final V ret = node.getValueVolatile();
|
|
+ if (ret == null) {
|
|
+ // ret == null for nodes pre-allocated to compute() and friends
|
|
+ return defaultValue;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether the specified key is mapped to some value.
|
|
+ * @param key Specified key
|
|
+ */
|
|
+ public boolean containsKey(final long key) {
|
|
+ // cannot use getNode, as the node may be a placeholder for compute()
|
|
+ return this.get(key) != null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether the specified value has a key mapped to it.
|
|
+ * @param value Specified value
|
|
+ * @throws NullPointerException If value is null
|
|
+ */
|
|
+ public boolean containsValue(final V value) {
|
|
+ Validate.notNull(value, "Value cannot be null");
|
|
+
|
|
+ final NodeIterator<V> iterator = new NodeIterator<>(this.table);
|
|
+
|
|
+ TableEntry<V> node;
|
|
+ while ((node = iterator.findNext()) != null) {
|
|
+ // need to use acquire here to ensure the happens-before relationship
|
|
+ if (node.getValueAcquire() == value) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the number of mappings in this map.
|
|
+ */
|
|
+ public int size() {
|
|
+ final long ret = this.size.sum();
|
|
+
|
|
+ if (ret < 0L) {
|
|
+ return 0;
|
|
+ }
|
|
+ if (ret > (long)Integer.MAX_VALUE) {
|
|
+ return Integer.MAX_VALUE;
|
|
+ }
|
|
+
|
|
+ return (int)ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether this map has no mappings.
|
|
+ */
|
|
+ public boolean isEmpty() {
|
|
+ return this.size.sum() <= 0L;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Adds count to size and checks threshold for resizing
|
|
+ */
|
|
+ protected final void addSize(final long count) {
|
|
+ this.size.add(count);
|
|
+
|
|
+ final int threshold = this.getThresholdAcquire();
|
|
+
|
|
+ if (threshold < 0L) {
|
|
+ // resizing or no resizing allowed, in either cases we do not need to do anything
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final long sum = this.size.sum();
|
|
+
|
|
+ if (sum < (long)threshold) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (threshold != this.compareExchangeThresholdVolatile(threshold, THRESHOLD_RESIZING)) {
|
|
+ // some other thread resized
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // create new table
|
|
+ this.resize(sum);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Resizes table, only invoke for the thread which has successfully updated threshold to {@link #THRESHOLD_RESIZING}
|
|
+ * @param sum Estimate of current mapping count, must be >= old threshold
|
|
+ */
|
|
+ private void resize(final long sum) {
|
|
+ int capacity;
|
|
+
|
|
+ // add 1.0, as sum may equal threshold (in which case, sum / loadFactor = current capacity)
|
|
+ // adding 1.0 should at least raise the size by a factor of two due to usage of roundCeilLog2
|
|
+ final double targetD = ((double)sum / (double)this.loadFactor) + 1.0;
|
|
+ if (targetD >= (double)MAXIMUM_CAPACITY) {
|
|
+ capacity = MAXIMUM_CAPACITY;
|
|
+ } else {
|
|
+ capacity = (int)Math.ceil(targetD);
|
|
+ capacity = IntegerUtil.roundCeilLog2(capacity);
|
|
+ if (capacity > MAXIMUM_CAPACITY) {
|
|
+ capacity = MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // create new table data
|
|
+
|
|
+ // noinspection unchecked
|
|
+ final TableEntry<V>[] newTable = new TableEntry[capacity];
|
|
+ // noinspection unchecked
|
|
+ final TableEntry<V> resizeNode = new TableEntry<>(0L, (V)newTable, true);
|
|
+
|
|
+ // transfer nodes from old table
|
|
+
|
|
+ // does not need to be volatile read, just plain
|
|
+ final TableEntry<V>[] oldTable = this.table;
|
|
+
|
|
+ // when resizing, the old entries at bin i (where i = hash % oldTable.length) are assigned to
|
|
+ // bin k in the new table (where k = hash % newTable.length)
|
|
+ // since both table lengths are powers of two (specifically, newTable is a multiple of oldTable),
|
|
+ // the possible number of locations in the new table to assign any given i is newTable.length/oldTable.length
|
|
+
|
|
+ // we can build the new linked nodes for the new table by using a work array sized to newTable.length/oldTable.length
|
|
+ // which holds the _last_ entry in the chain per bin
|
|
+
|
|
+ final int capOldShift = IntegerUtil.floorLog2(oldTable.length);
|
|
+ final int capDiffShift = IntegerUtil.floorLog2(capacity) - capOldShift;
|
|
+
|
|
+ if (capDiffShift == 0) {
|
|
+ throw new IllegalStateException("Resizing to same size");
|
|
+ }
|
|
+
|
|
+ // noinspection unchecked
|
|
+ final TableEntry<V>[] work = new TableEntry[1 << capDiffShift]; // typically, capDiffShift = 1
|
|
+
|
|
+ for (int i = 0, len = oldTable.length; i < len; ++i) {
|
|
+ TableEntry<V> binNode = getAtIndexVolatile(oldTable, i);
|
|
+
|
|
+ for (;;) {
|
|
+ if (binNode == null) {
|
|
+ // just need to replace the bin node, do not need to move anything
|
|
+ if (null == (binNode = compareAndExchangeAtIndexVolatile(oldTable, i, null, resizeNode))) {
|
|
+ break;
|
|
+ } // else: binNode != null, fall through
|
|
+ }
|
|
+
|
|
+ // need write lock to block other writers
|
|
+ synchronized (binNode) {
|
|
+ if (binNode != (binNode = getAtIndexVolatile(oldTable, i))) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ // an important detail of resizing is that we do not need to be concerned with synchronisation on
|
|
+ // writes to the new table, as no access to any nodes on bin i on oldTable will occur until a thread
|
|
+ // sees the resizeNode
|
|
+ // specifically, as long as the resizeNode is release written there are no cases where another thread
|
|
+ // will see our writes to the new table
|
|
+
|
|
+ TableEntry<V> next = binNode.getNextPlain();
|
|
+
|
|
+ if (next == null) {
|
|
+ // simple case: do not use work array
|
|
+
|
|
+ // do not need to create new node, readers only need to see the state of the map at the
|
|
+ // beginning of a call, so any additions onto _next_ don't really matter
|
|
+ // additionally, the old node is replaced so that writers automatically forward to the new table,
|
|
+ // which resolves any issues
|
|
+ newTable[getHash(binNode.key) & (capacity - 1)] = binNode;
|
|
+ } else {
|
|
+ // reset for next usage
|
|
+ Arrays.fill(work, null);
|
|
+
|
|
+ for (TableEntry<V> curr = binNode; curr != null; curr = curr.getNextPlain()) {
|
|
+ final int newTableIdx = getHash(curr.key) & (capacity - 1);
|
|
+ final int workIdx = newTableIdx >>> capOldShift;
|
|
+
|
|
+ final TableEntry<V> replace = new TableEntry<>(curr.key, curr.getValuePlain());
|
|
+
|
|
+ final TableEntry<V> workNode = work[workIdx];
|
|
+ work[workIdx] = replace;
|
|
+
|
|
+ if (workNode == null) {
|
|
+ newTable[newTableIdx] = replace;
|
|
+ continue;
|
|
+ } else {
|
|
+ workNode.setNextPlain(replace);
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ setAtIndexRelease(oldTable, i, resizeNode);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // calculate new threshold
|
|
+ final int newThreshold;
|
|
+ if (capacity == MAXIMUM_CAPACITY) {
|
|
+ newThreshold = THRESHOLD_NO_RESIZE;
|
|
+ } else {
|
|
+ newThreshold = getTargetThreshold(capacity, loadFactor);
|
|
+ }
|
|
+
|
|
+ this.table = newTable;
|
|
+ // finish resize operation by releasing hold on threshold
|
|
+ this.setThresholdVolatile(newThreshold);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Subtracts count from size
|
|
+ */
|
|
+ protected final void subSize(final long count) {
|
|
+ this.size.add(-count);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically updates the value associated with {@code key} to {@code value}, or inserts a new mapping with {@code key}
|
|
+ * mapped to {@code value}.
|
|
+ * @param key Specified key
|
|
+ * @param value Specified value
|
|
+ * @throws NullPointerException If value is null
|
|
+ * @return Old value previously associated with key, or {@code null} if none.
|
|
+ */
|
|
+ public V put(final long key, final V value) {
|
|
+ Validate.notNull(value, "Value may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, value)))) {
|
|
+ // successfully inserted
|
|
+ this.addSize(1L);
|
|
+ return null;
|
|
+ } // else: node != null, fall through
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V ret = node.getValuePlain();
|
|
+ node.setValueVolatile(value);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ prev.setNextRelease(new TableEntry<>(key, value));
|
|
+ }
|
|
+
|
|
+ this.addSize(1L);
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically inserts a new mapping with {@code key} mapped to {@code value} if and only if {@code key} is not
|
|
+ * currently mapped to some value.
|
|
+ * @param key Specified key
|
|
+ * @param value Specified value
|
|
+ * @throws NullPointerException If value is null
|
|
+ * @return Value currently associated with key, or {@code null} if none and {@code value} was associated.
|
|
+ */
|
|
+ public V putIfAbsent(final long key, final V value) {
|
|
+ Validate.notNull(value, "Value may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, value)))) {
|
|
+ // successfully inserted
|
|
+ this.addSize(1L);
|
|
+ return null;
|
|
+ } // else: node != null, fall through
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ // optimise ifAbsent calls: check if first node is key before attempting lock acquire
|
|
+ if (node.key == key) {
|
|
+ final V ret = node.getValueVolatile();
|
|
+ if (ret != null) {
|
|
+ return ret;
|
|
+ } // else: fall back to lock to read the node
|
|
+ }
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ return node.getValuePlain();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ prev.setNextRelease(new TableEntry<>(key, value));
|
|
+ }
|
|
+
|
|
+ this.addSize(1L);
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically updates the value associated with {@code key} to {@code value}, or does nothing if {@code key} is not
|
|
+ * associated with a value.
|
|
+ * @param key Specified key
|
|
+ * @param value Specified value
|
|
+ * @throws NullPointerException If value is null
|
|
+ * @return Old value previously associated with key, or {@code null} if none.
|
|
+ */
|
|
+ public V replace(final long key, final V value) {
|
|
+ Validate.notNull(value, "Value may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ for (; node != null; node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V ret = node.getValuePlain();
|
|
+ node.setValueVolatile(value);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically updates the value associated with {@code key} to {@code update} if the currently associated
|
|
+ * value is reference equal to {@code expect}, otherwise does nothing.
|
|
+ * @param key Specified key
|
|
+ * @param expect Expected value to check current mapped value with
|
|
+ * @param update Update value to replace mapped value with
|
|
+ * @throws NullPointerException If value is null
|
|
+ * @return If the currently mapped value is not reference equal to {@code expect}, then returns the currently mapped
|
|
+ * value. If the key is not mapped to any value, then returns {@code null}. If neither of the two cases are
|
|
+ * true, then returns {@code expect}.
|
|
+ */
|
|
+ public V replace(final long key, final V expect, final V update) {
|
|
+ Validate.notNull(expect, "Expect may not be null");
|
|
+ Validate.notNull(update, "Update may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ for (; node != null; node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V ret = node.getValuePlain();
|
|
+
|
|
+ if (ret != expect) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ node.setValueVolatile(update);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically removes the mapping for the specified key and returns the value it was associated with. If the key
|
|
+ * is not mapped to a value, then does nothing and returns {@code null}.
|
|
+ * @param key Specified key
|
|
+ * @return Old value previously associated with key, or {@code null} if none.
|
|
+ */
|
|
+ public V remove(final long key) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+ V ret = null;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+
|
|
+ TableEntry<V> prev = null;
|
|
+
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ ret = node.getValuePlain();
|
|
+ removed = true;
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically removes the mapping for the specified key if it is mapped to {@code expect} and returns {@code expect}. If the key
|
|
+ * is not mapped to a value, then does nothing and returns {@code null}. If the key is mapped to a value that is not reference
|
|
+ * equal to {@code expect}, then returns that value.
|
|
+ * @param key Specified key
|
|
+ * @param expect Specified expected value
|
|
+ * @return The specified expected value if the key was mapped to {@code expect}. If
|
|
+ * the key is not mapped to any value, then returns {@code null}. If neither of those cases are true,
|
|
+ * then returns the current (non-null) mapped value for key.
|
|
+ */
|
|
+ public V remove(final long key, final V expect) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+ V ret = null;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+
|
|
+ TableEntry<V> prev = null;
|
|
+
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ ret = node.getValuePlain();
|
|
+ if (ret == expect) {
|
|
+ removed = true;
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Atomically removes the mapping for the specified key the predicate returns true for its currently mapped value. If the key
|
|
+ * is not mapped to a value, then does nothing and returns {@code null}.
|
|
+ *
|
|
+ * <p>
|
|
+ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
|
|
+ * </p>
|
|
+ *
|
|
+ * @param key Specified key
|
|
+ * @param predicate Specified predicate
|
|
+ * @throws NullPointerException If predicate is null
|
|
+ * @return The specified expected value if the key was mapped to {@code expect}. If
|
|
+ * the key is not mapped to any value, then returns {@code null}. If neither of those cases are true,
|
|
+ * then returns the current (non-null) mapped value for key.
|
|
+ */
|
|
+ public V removeIf(final long key, final Predicate<? super V> predicate) {
|
|
+ Validate.notNull(predicate, "Predicate may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+ V ret = null;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+
|
|
+ TableEntry<V> prev = null;
|
|
+
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ ret = node.getValuePlain();
|
|
+ if (predicate.test(ret)) {
|
|
+ removed = true;
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * See {@link java.util.concurrent.ConcurrentMap#compute(Object, BiFunction)}
|
|
+ * <p>
|
|
+ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
|
|
+ * </p>
|
|
+ */
|
|
+ public V compute(final long key, final BiLong1Function<? super V, ? extends V> function) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ V ret = null;
|
|
+ if (node == null) {
|
|
+ final TableEntry<V> insert = new TableEntry<>(key, null);
|
|
+
|
|
+ boolean added = false;
|
|
+
|
|
+ synchronized (insert) {
|
|
+ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, insert))) {
|
|
+ try {
|
|
+ ret = function.apply(key, null);
|
|
+ } catch (final Throwable throwable) {
|
|
+ setAtIndexVolatile(table, index, null);
|
|
+ ThrowUtil.throwUnchecked(throwable);
|
|
+ // unreachable
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (ret == null) {
|
|
+ setAtIndexVolatile(table, index, null);
|
|
+ return ret;
|
|
+ } else {
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ insert.setValueRelease(ret);
|
|
+ added = true;
|
|
+ }
|
|
+ } // else: node != null, fall through
|
|
+ }
|
|
+
|
|
+ if (added) {
|
|
+ this.addSize(1L);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+ boolean added = false;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V old = node.getValuePlain();
|
|
+
|
|
+ final V computed = function.apply(key, old);
|
|
+
|
|
+ if (computed != null) {
|
|
+ node.setValueVolatile(computed);
|
|
+ return computed;
|
|
+ }
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+
|
|
+ removed = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!removed) {
|
|
+ final V computed = function.apply(key, null);
|
|
+ if (computed != null) {
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ prev.setNextRelease(new TableEntry<>(key, computed));
|
|
+ ret = computed;
|
|
+ added = true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+ if (added) {
|
|
+ this.addSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * See {@link java.util.concurrent.ConcurrentMap#computeIfAbsent(Object, Function)}
|
|
+ * <p>
|
|
+ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
|
|
+ * </p>
|
|
+ */
|
|
+ public V computeIfAbsent(final long key, final LongFunction<? extends V> function) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ V ret = null;
|
|
+ if (node == null) {
|
|
+ final TableEntry<V> insert = new TableEntry<>(key, null);
|
|
+
|
|
+ boolean added = false;
|
|
+
|
|
+ synchronized (insert) {
|
|
+ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, insert))) {
|
|
+ try {
|
|
+ ret = function.apply(key);
|
|
+ } catch (final Throwable throwable) {
|
|
+ setAtIndexVolatile(table, index, null);
|
|
+ ThrowUtil.throwUnchecked(throwable);
|
|
+ // unreachable
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (ret == null) {
|
|
+ setAtIndexVolatile(table, index, null);
|
|
+ return null;
|
|
+ } else {
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ insert.setValueRelease(ret);
|
|
+ added = true;
|
|
+ }
|
|
+ } // else: node != null, fall through
|
|
+ }
|
|
+
|
|
+ if (added) {
|
|
+ this.addSize(1L);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ // optimise ifAbsent calls: check if first node is key before attempting lock acquire
|
|
+ if (node.key == key) {
|
|
+ ret = node.getValueVolatile();
|
|
+ if (ret != null) {
|
|
+ return ret;
|
|
+ } // else: fall back to lock to read the node
|
|
+ }
|
|
+
|
|
+ boolean added = false;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ ret = node.getValuePlain();
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final V computed = function.apply(key);
|
|
+ if (computed != null) {
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ prev.setNextRelease(new TableEntry<>(key, computed));
|
|
+ ret = computed;
|
|
+ added = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (added) {
|
|
+ this.addSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * See {@link java.util.concurrent.ConcurrentMap#computeIfPresent(Object, BiFunction)}
|
|
+ * <p>
|
|
+ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
|
|
+ * </p>
|
|
+ */
|
|
+ public V computeIfPresent(final long key, final BiLong1Function<? super V, ? extends V> function) {
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V old = node.getValuePlain();
|
|
+
|
|
+ final V computed = function.apply(key, old);
|
|
+
|
|
+ if (computed != null) {
|
|
+ node.setValueVolatile(computed);
|
|
+ return computed;
|
|
+ }
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+
|
|
+ removed = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * See {@link java.util.concurrent.ConcurrentMap#merge(Object, Object, BiFunction)}
|
|
+ * <p>
|
|
+ * This function is a "functional methods" as defined by {@link ConcurrentLong2ReferenceChainedHashTable}.
|
|
+ * </p>
|
|
+ */
|
|
+ public V merge(final long key, final V def, final BiFunction<? super V, ? super V, ? extends V> function) {
|
|
+ Validate.notNull(def, "Default value may not be null");
|
|
+
|
|
+ final int hash = getHash(key);
|
|
+
|
|
+ TableEntry<V>[] table = this.table;
|
|
+ table_loop:
|
|
+ for (;;) {
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ TableEntry<V> node = getAtIndexVolatile(table, index);
|
|
+ node_loop:
|
|
+ for (;;) {
|
|
+ if (node == null) {
|
|
+ if (null == (node = compareAndExchangeAtIndexVolatile(table, index, null, new TableEntry<>(key, def)))) {
|
|
+ // successfully inserted
|
|
+ this.addSize(1L);
|
|
+ return def;
|
|
+ } // else: node != null, fall through
|
|
+ }
|
|
+
|
|
+ if (node.resize) {
|
|
+ // noinspection unchecked
|
|
+ table = (TableEntry<V>[])node.getValuePlain();
|
|
+ continue table_loop;
|
|
+ }
|
|
+
|
|
+ boolean removed = false;
|
|
+ boolean added = false;
|
|
+ V ret = null;
|
|
+
|
|
+ synchronized (node) {
|
|
+ if (node != (node = getAtIndexVolatile(table, index))) {
|
|
+ continue node_loop;
|
|
+ }
|
|
+ // plain reads are fine during synchronised access, as we are the only writer
|
|
+ TableEntry<V> prev = null;
|
|
+ for (; node != null; prev = node, node = node.getNextPlain()) {
|
|
+ if (node.key == key) {
|
|
+ final V old = node.getValuePlain();
|
|
+
|
|
+ final V computed = function.apply(old, def);
|
|
+
|
|
+ if (computed != null) {
|
|
+ node.setValueVolatile(computed);
|
|
+ return computed;
|
|
+ }
|
|
+
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, node.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(node.getNextPlain());
|
|
+ }
|
|
+
|
|
+ removed = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!removed) {
|
|
+ // volatile ordering ensured by addSize(), but we need release here
|
|
+ // to ensure proper ordering with reads and other writes
|
|
+ prev.setNextRelease(new TableEntry<>(key, def));
|
|
+ ret = def;
|
|
+ added = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (removed) {
|
|
+ this.subSize(1L);
|
|
+ }
|
|
+ if (added) {
|
|
+ this.addSize(1L);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Removes at least all entries currently mapped at the beginning of this call. May not remove entries added during
|
|
+ * this call. As a result, only if this map is not modified during the call, that all entries will be removed by
|
|
+ * the end of the call.
|
|
+ *
|
|
+ * <p>
|
|
+ * This function is not atomic.
|
|
+ * </p>
|
|
+ */
|
|
+ public void clear() {
|
|
+ // it is possible to optimise this to directly interact with the table,
|
|
+ // but we do need to be careful when interacting with resized tables,
|
|
+ // and the NodeIterator already does this logic
|
|
+ final NodeIterator<V> nodeIterator = new NodeIterator<>(this.table);
|
|
+
|
|
+ TableEntry<V> node;
|
|
+ while ((node = nodeIterator.findNext()) != null) {
|
|
+ this.remove(node.key);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns an iterator over the entries in this map. The iterator is only guaranteed to see entries that were
|
|
+ * added before the beginning of this call, but it may see entries added during.
|
|
+ */
|
|
+ public Iterator<TableEntry<V>> entryIterator() {
|
|
+ return new EntryIterator<>(this);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public final Iterator<TableEntry<V>> iterator() {
|
|
+ return this.entryIterator();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns an iterator over the keys in this map. The iterator is only guaranteed to see keys that were
|
|
+ * added before the beginning of this call, but it may see keys added during.
|
|
+ */
|
|
+ public PrimitiveIterator.OfLong keyIterator() {
|
|
+ return new KeyIterator<>(this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns an iterator over the values in this map. The iterator is only guaranteed to see values that were
|
|
+ * added before the beginning of this call, but it may see values added during.
|
|
+ */
|
|
+ public Iterator<V> valueIterator() {
|
|
+ return new ValueIterator<>(this);
|
|
+ }
|
|
+
|
|
+ protected static final class EntryIterator<V> extends BaseIteratorImpl<V, TableEntry<V>> {
|
|
+
|
|
+ public EntryIterator(final ConcurrentLong2ReferenceChainedHashTable<V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public TableEntry<V> next() throws NoSuchElementException {
|
|
+ return this.nextNode();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEachRemaining(final Consumer<? super TableEntry<V>> action) {
|
|
+ Validate.notNull(action, "Action may not be null");
|
|
+ while (this.hasNext()) {
|
|
+ action.accept(this.next());
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class KeyIterator<V> extends BaseIteratorImpl<V, Long> implements PrimitiveIterator.OfLong {
|
|
+
|
|
+ public KeyIterator(final ConcurrentLong2ReferenceChainedHashTable<V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Long next() throws NoSuchElementException {
|
|
+ return Long.valueOf(this.nextNode().key);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public long nextLong() {
|
|
+ return this.nextNode().key;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEachRemaining(final Consumer<? super Long> action) {
|
|
+ Validate.notNull(action, "Action may not be null");
|
|
+
|
|
+ if (action instanceof LongConsumer longConsumer) {
|
|
+ this.forEachRemaining(longConsumer);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ while (this.hasNext()) {
|
|
+ action.accept(this.next());
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEachRemaining(final LongConsumer action) {
|
|
+ Validate.notNull(action, "Action may not be null");
|
|
+ while (this.hasNext()) {
|
|
+ action.accept(this.nextLong());
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class ValueIterator<V> extends BaseIteratorImpl<V, V> {
|
|
+
|
|
+ public ValueIterator(final ConcurrentLong2ReferenceChainedHashTable<V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public V next() throws NoSuchElementException {
|
|
+ return this.nextNode().getValueVolatile();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEachRemaining(final Consumer<? super V> action) {
|
|
+ Validate.notNull(action, "Action may not be null");
|
|
+ while (this.hasNext()) {
|
|
+ action.accept(this.next());
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static abstract class BaseIteratorImpl<V, T> extends NodeIterator<V> implements Iterator<T> {
|
|
+
|
|
+ protected final ConcurrentLong2ReferenceChainedHashTable<V> map;
|
|
+ protected TableEntry<V> lastReturned;
|
|
+ protected TableEntry<V> nextToReturn;
|
|
+
|
|
+ protected BaseIteratorImpl(final ConcurrentLong2ReferenceChainedHashTable<V> map) {
|
|
+ super(map.table);
|
|
+ this.map = map;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public final boolean hasNext() {
|
|
+ if (this.nextToReturn != null) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return (this.nextToReturn = this.findNext()) != null;
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<V> nextNode() throws NoSuchElementException {
|
|
+ TableEntry<V> ret = this.nextToReturn;
|
|
+ if (ret != null) {
|
|
+ this.lastReturned = ret;
|
|
+ this.nextToReturn = null;
|
|
+ return ret;
|
|
+ }
|
|
+ ret = this.findNext();
|
|
+ if (ret != null) {
|
|
+ this.lastReturned = ret;
|
|
+ return ret;
|
|
+ }
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public final void remove() {
|
|
+ final TableEntry<V> lastReturned = this.lastReturned;
|
|
+ if (lastReturned == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+ this.lastReturned = null;
|
|
+ this.map.remove(lastReturned.key);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public abstract T next() throws NoSuchElementException;
|
|
+
|
|
+ // overwritten by subclasses to avoid indirection on hasNext() and next()
|
|
+ @Override
|
|
+ public abstract void forEachRemaining(final Consumer<? super T> action);
|
|
+ }
|
|
+
|
|
+ protected static class NodeIterator<V> {
|
|
+
|
|
+ protected TableEntry<V>[] currentTable;
|
|
+ protected ResizeChain<V> resizeChain;
|
|
+ protected TableEntry<V> last;
|
|
+ protected int nextBin;
|
|
+ protected int increment;
|
|
+
|
|
+ protected NodeIterator(final TableEntry<V>[] baseTable) {
|
|
+ this.currentTable = baseTable;
|
|
+ this.increment = 1;
|
|
+ }
|
|
+
|
|
+ private TableEntry<V>[] pullResizeChain(final int index) {
|
|
+ final ResizeChain<V> resizeChain = this.resizeChain;
|
|
+ if (resizeChain == null) {
|
|
+ this.currentTable = null;
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final ResizeChain<V> prevChain = resizeChain.prev;
|
|
+ this.resizeChain = prevChain;
|
|
+ if (prevChain == null) {
|
|
+ this.currentTable = null;
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final TableEntry<V>[] newTable = prevChain.table;
|
|
+
|
|
+ // we recover the original index by modding by the new table length, as the increments applied to the index
|
|
+ // are a multiple of the new table's length
|
|
+ int newIdx = index & (newTable.length - 1);
|
|
+
|
|
+ // the increment is always the previous table's length
|
|
+ final ResizeChain<V> nextPrevChain = prevChain.prev;
|
|
+ final int increment;
|
|
+ if (nextPrevChain == null) {
|
|
+ increment = 1;
|
|
+ } else {
|
|
+ increment = nextPrevChain.table.length;
|
|
+ }
|
|
+
|
|
+ // done with the upper table, so we can skip the resize node
|
|
+ newIdx += increment;
|
|
+
|
|
+ this.increment = increment;
|
|
+ this.nextBin = newIdx;
|
|
+ this.currentTable = newTable;
|
|
+
|
|
+ return newTable;
|
|
+ }
|
|
+
|
|
+ private TableEntry<V>[] pushResizeChain(final TableEntry<V>[] table, final TableEntry<V> entry) {
|
|
+ final ResizeChain<V> chain = this.resizeChain;
|
|
+
|
|
+ if (chain == null) {
|
|
+ // noinspection unchecked
|
|
+ final TableEntry<V>[] nextTable = (TableEntry<V>[])entry.getValuePlain();
|
|
+
|
|
+ final ResizeChain<V> oldChain = new ResizeChain<>(table, null, null);
|
|
+ final ResizeChain<V> currChain = new ResizeChain<>(nextTable, oldChain, null);
|
|
+ oldChain.next = currChain;
|
|
+
|
|
+ this.increment = table.length;
|
|
+ this.resizeChain = currChain;
|
|
+ this.currentTable = nextTable;
|
|
+
|
|
+ return nextTable;
|
|
+ } else {
|
|
+ ResizeChain<V> currChain = chain.next;
|
|
+ if (currChain == null) {
|
|
+ // noinspection unchecked
|
|
+ final TableEntry<V>[] ret = (TableEntry<V>[])entry.getValuePlain();
|
|
+ currChain = new ResizeChain<>(ret, chain, null);
|
|
+ chain.next = currChain;
|
|
+
|
|
+ this.increment = table.length;
|
|
+ this.resizeChain = currChain;
|
|
+ this.currentTable = ret;
|
|
+
|
|
+ return ret;
|
|
+ } else {
|
|
+ this.increment = table.length;
|
|
+ this.resizeChain = currChain;
|
|
+ return this.currentTable = currChain.table;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<V> findNext() {
|
|
+ for (;;) {
|
|
+ final TableEntry<V> last = this.last;
|
|
+ if (last != null) {
|
|
+ final TableEntry<V> next = last.getNextVolatile();
|
|
+ if (next != null) {
|
|
+ this.last = next;
|
|
+ if (next.getValuePlain() == null) {
|
|
+ // compute() node not yet available
|
|
+ continue;
|
|
+ }
|
|
+ return next;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ TableEntry<V>[] table = this.currentTable;
|
|
+
|
|
+ if (table == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ int idx = this.nextBin;
|
|
+ int increment = this.increment;
|
|
+ for (;;) {
|
|
+ if (idx >= table.length) {
|
|
+ table = this.pullResizeChain(idx);
|
|
+ idx = this.nextBin;
|
|
+ increment = this.increment;
|
|
+ if (table != null) {
|
|
+ continue;
|
|
+ } else {
|
|
+ this.last = null;
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final TableEntry<V> entry = getAtIndexVolatile(table, idx);
|
|
+ if (entry == null) {
|
|
+ idx += increment;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (entry.resize) {
|
|
+ // push onto resize chain
|
|
+ table = this.pushResizeChain(table, entry);
|
|
+ increment = this.increment;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ this.last = entry;
|
|
+ this.nextBin = idx + increment;
|
|
+ if (entry.getValuePlain() != null) {
|
|
+ return entry;
|
|
+ } else {
|
|
+ // compute() node not yet available
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class ResizeChain<V> {
|
|
+
|
|
+ public final TableEntry<V>[] table;
|
|
+ public final ResizeChain<V> prev;
|
|
+ public ResizeChain<V> next;
|
|
+
|
|
+ public ResizeChain(final TableEntry<V>[] table, final ResizeChain<V> prev, final ResizeChain<V> next) {
|
|
+ this.table = table;
|
|
+ this.prev = prev;
|
|
+ this.next = next;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class TableEntry<V> {
|
|
+
|
|
+ private static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
|
|
+
|
|
+ private final boolean resize;
|
|
+
|
|
+ private final long key;
|
|
+
|
|
+ private volatile V value;
|
|
+ private static final VarHandle VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class);
|
|
+
|
|
+ private V getValuePlain() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ private V getValueAcquire() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ private V getValueVolatile() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ private void setValuePlain(final V value) {
|
|
+ VALUE_HANDLE.set(this, (Object)value);
|
|
+ }
|
|
+
|
|
+ private void setValueRelease(final V value) {
|
|
+ VALUE_HANDLE.setRelease(this, (Object)value);
|
|
+ }
|
|
+
|
|
+ private void setValueVolatile(final V value) {
|
|
+ VALUE_HANDLE.setVolatile(this, (Object)value);
|
|
+ }
|
|
+
|
|
+ private volatile TableEntry<V> next;
|
|
+ private static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class);
|
|
+
|
|
+ private TableEntry<V> getNextPlain() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)NEXT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ private TableEntry<V> getNextVolatile() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)NEXT_HANDLE.getVolatile(this);
|
|
+ }
|
|
+
|
|
+ private void setNextPlain(final TableEntry<V> next) {
|
|
+ NEXT_HANDLE.set(this, next);
|
|
+ }
|
|
+
|
|
+ private void setNextRelease(final TableEntry<V> next) {
|
|
+ NEXT_HANDLE.setRelease(this, next);
|
|
+ }
|
|
+
|
|
+ private void setNextVolatile(final TableEntry<V> next) {
|
|
+ NEXT_HANDLE.setVolatile(this, next);
|
|
+ }
|
|
+
|
|
+ public TableEntry(final long key, final V value) {
|
|
+ this.resize = false;
|
|
+ this.key = key;
|
|
+ this.setValuePlain(value);
|
|
+ }
|
|
+
|
|
+ public TableEntry(final long key, final V value, final boolean resize) {
|
|
+ this.resize = resize;
|
|
+ this.key = key;
|
|
+ this.setValuePlain(value);
|
|
+ }
|
|
+
|
|
+ public long getKey() {
|
|
+ return this.key;
|
|
+ }
|
|
+
|
|
+ public V getValue() {
|
|
+ return this.getValueVolatile();
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..83965350d292ccf42a34520d84dcda3f88146cff
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRHashTable.java
|
|
@@ -0,0 +1,1656 @@
|
|
+package ca.spottedleaf.concurrentutil.map;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.util.CollectionUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.HashUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.IntegerUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Validate;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Arrays;
|
|
+import java.util.Collection;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.Map;
|
|
+import java.util.NoSuchElementException;
|
|
+import java.util.Set;
|
|
+import java.util.Spliterator;
|
|
+import java.util.Spliterators;
|
|
+import java.util.function.BiConsumer;
|
|
+import java.util.function.BiFunction;
|
|
+import java.util.function.BiPredicate;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Function;
|
|
+import java.util.function.IntFunction;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+/**
|
|
+ * <p>
|
|
+ * Note: Not really tested, use at your own risk.
|
|
+ * </p>
|
|
+ * This map is safe for reading from multiple threads, however it is only safe to write from a single thread.
|
|
+ * {@code null} keys or values are not permitted. Writes to values in this map are guaranteed to be ordered by release semantics,
|
|
+ * however immediate visibility to other threads is not guaranteed. However, writes are guaranteed to be made visible eventually.
|
|
+ * Reads are ordered by acquire semantics.
|
|
+ * <p>
|
|
+ * Iterators cannot be modified concurrently, and its backing map cannot be modified concurrently. There is no
|
|
+ * fast-fail attempt made by iterators, thus modifying the iterator's backing map while iterating will have undefined
|
|
+ * behaviour.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * Subclasses should override {@link #clone()} to return correct instances of this class.
|
|
+ * </p>
|
|
+ * @param <K> {@inheritDoc}
|
|
+ * @param <V> {@inheritDoc}
|
|
+ */
|
|
+public class SWMRHashTable<K, V> implements Map<K, V>, Iterable<Map.Entry<K, V>> {
|
|
+
|
|
+ protected int size;
|
|
+
|
|
+ protected TableEntry<K, V>[] table;
|
|
+
|
|
+ protected final float loadFactor;
|
|
+
|
|
+ protected static final VarHandle SIZE_HANDLE = ConcurrentUtil.getVarHandle(SWMRHashTable.class, "size", int.class);
|
|
+ protected static final VarHandle TABLE_HANDLE = ConcurrentUtil.getVarHandle(SWMRHashTable.class, "table", TableEntry[].class);
|
|
+
|
|
+ /* size */
|
|
+
|
|
+ protected final int getSizePlain() {
|
|
+ return (int)SIZE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final int getSizeOpaque() {
|
|
+ return (int)SIZE_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ protected final int getSizeAcquire() {
|
|
+ return (int)SIZE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setSizePlain(final int value) {
|
|
+ SIZE_HANDLE.set(this, value);
|
|
+ }
|
|
+
|
|
+ protected final void setSizeOpaque(final int value) {
|
|
+ SIZE_HANDLE.setOpaque(this, value);
|
|
+ }
|
|
+
|
|
+ protected final void setSizeRelease(final int value) {
|
|
+ SIZE_HANDLE.setRelease(this, value);
|
|
+ }
|
|
+
|
|
+ /* table */
|
|
+
|
|
+ protected final TableEntry<K, V>[] getTablePlain() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<K, V>[])TABLE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<K, V>[] getTableAcquire() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<K, V>[])TABLE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setTablePlain(final TableEntry<K, V>[] table) {
|
|
+ TABLE_HANDLE.set(this, table);
|
|
+ }
|
|
+
|
|
+ protected final void setTableRelease(final TableEntry<K, V>[] table) {
|
|
+ TABLE_HANDLE.setRelease(this, table);
|
|
+ }
|
|
+
|
|
+ protected static final int DEFAULT_CAPACITY = 16;
|
|
+ protected static final float DEFAULT_LOAD_FACTOR = 0.75f;
|
|
+ protected static final int MAXIMUM_CAPACITY = Integer.MIN_VALUE >>> 1;
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a capacity of {@code 16} and load factor of {@code 0.75f}.
|
|
+ */
|
|
+ public SWMRHashTable() {
|
|
+ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with the specified capacity and load factor of {@code 0.75f}.
|
|
+ * @param capacity specified initial capacity, > 0
|
|
+ */
|
|
+ public SWMRHashTable(final int capacity) {
|
|
+ this(capacity, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with the specified capacity and load factor.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param loadFactor specified load factor, > 0 && finite
|
|
+ */
|
|
+ public SWMRHashTable(final int capacity, final float loadFactor) {
|
|
+ final int tableSize = getCapacityFor(capacity);
|
|
+
|
|
+ if (loadFactor <= 0.0 || !Float.isFinite(loadFactor)) {
|
|
+ throw new IllegalArgumentException("Invalid load factor: " + loadFactor);
|
|
+ }
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<K, V>[] table = new TableEntry[tableSize];
|
|
+ this.setTablePlain(table);
|
|
+
|
|
+ if (tableSize == MAXIMUM_CAPACITY) {
|
|
+ this.threshold = -1;
|
|
+ } else {
|
|
+ this.threshold = getTargetCapacity(tableSize, loadFactor);
|
|
+ }
|
|
+
|
|
+ this.loadFactor = loadFactor;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a capacity of {@code 16} or the specified map's size, whichever is larger, and
|
|
+ * with a load factor of {@code 0.75f}.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRHashTable(final Map<K, V> other) {
|
|
+ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, other);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a minimum capacity of the specified capacity or the specified map's size, whichever is larger, and
|
|
+ * with a load factor of {@code 0.75f}.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRHashTable(final int capacity, final Map<K, V> other) {
|
|
+ this(capacity, DEFAULT_LOAD_FACTOR, other);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a min capacity of the specified capacity or the specified map's size, whichever is larger, and
|
|
+ * with the specified load factor.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param loadFactor specified load factor, > 0 && finite
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRHashTable(final int capacity, final float loadFactor, final Map<K, V> other) {
|
|
+ this(Math.max(Validate.notNull(other, "Null map").size(), capacity), loadFactor);
|
|
+ this.putAll(other);
|
|
+ }
|
|
+
|
|
+ protected static <K, V> TableEntry<K, V> getAtIndexOpaque(final TableEntry<K, V>[] table, final int index) {
|
|
+ // noinspection unchecked
|
|
+ return (TableEntry<K, V>)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getOpaque(table, index);
|
|
+ }
|
|
+
|
|
+ protected static <K, V> void setAtIndexRelease(final TableEntry<K, V>[] table, final int index, final TableEntry<K, V> value) {
|
|
+ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
|
|
+ }
|
|
+
|
|
+ public final float getLoadFactor() {
|
|
+ return this.loadFactor;
|
|
+ }
|
|
+
|
|
+ protected static int getCapacityFor(final int capacity) {
|
|
+ if (capacity <= 0) {
|
|
+ throw new IllegalArgumentException("Invalid capacity: " + capacity);
|
|
+ }
|
|
+ if (capacity >= MAXIMUM_CAPACITY) {
|
|
+ return MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ return IntegerUtil.roundCeilLog2(capacity);
|
|
+ }
|
|
+
|
|
+ /** Callers must still use acquire when reading the value of the entry. */
|
|
+ protected final TableEntry<K, V> getEntryForOpaque(final K key) {
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
|
|
+ if (hash == curr.hash && (key == curr.key || curr.key.equals(key))) {
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<K, V> getEntryForPlain(final K key) {
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+
|
|
+ for (TableEntry<K, V> curr = table[hash & (table.length - 1)]; curr != null; curr = curr.getNextPlain()) {
|
|
+ if (hash == curr.hash && (key == curr.key || curr.key.equals(key))) {
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /* MT-Safe */
|
|
+
|
|
+ /** must be deterministic given a key */
|
|
+ private static int getHash(final Object key) {
|
|
+ int hash = key == null ? 0 : key.hashCode();
|
|
+ return HashUtil.mix(hash);
|
|
+ }
|
|
+
|
|
+ // rets -1 if capacity*loadFactor is too large
|
|
+ protected static int getTargetCapacity(final int capacity, final float loadFactor) {
|
|
+ final double ret = (double)capacity * (double)loadFactor;
|
|
+ if (Double.isInfinite(ret) || ret >= ((double)Integer.MAX_VALUE)) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return (int)ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean equals(final Object obj) {
|
|
+ if (this == obj) {
|
|
+ return true;
|
|
+ }
|
|
+ /* Make no attempt to deal with concurrent modifications */
|
|
+ if (!(obj instanceof Map<?, ?> other)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (this.size() != other.size()) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ final Object otherValue = other.get(curr.key);
|
|
+ if (otherValue == null || (value != otherValue && value.equals(otherValue))) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public int hashCode() {
|
|
+ /* Make no attempt to deal with concurrent modifications */
|
|
+ int hash = 0;
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ hash += curr.hashCode();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return hash;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ final StringBuilder builder = new StringBuilder(64);
|
|
+ builder.append("SWMRHashTable:{");
|
|
+
|
|
+ this.forEach((final K key, final V value) -> {
|
|
+ builder.append("{key: \"").append(key).append("\", value: \"").append(value).append("\"}");
|
|
+ });
|
|
+
|
|
+ return builder.append('}').toString();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public SWMRHashTable<K, V> clone() {
|
|
+ return new SWMRHashTable<>(this.getTableAcquire().length, this.loadFactor, this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public Iterator<Map.Entry<K, V>> iterator() {
|
|
+ return new EntryIterator<>(this.getTableAcquire(), this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void forEach(final Consumer<? super Map.Entry<K, V>> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ action.accept(curr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void forEach(final BiConsumer<? super K, ? super V> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ action.accept(curr.key, value);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Provides the specified consumer with all keys contained within this map.
|
|
+ * @param action The specified consumer.
|
|
+ */
|
|
+ public void forEachKey(final Consumer<? super K> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ action.accept(curr.key);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Provides the specified consumer with all values contained within this map. Equivalent to {@code map.values().forEach(Consumer)}.
|
|
+ * @param action The specified consumer.
|
|
+ */
|
|
+ public void forEachValue(final Consumer<? super V> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ action.accept(value);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V get(final Object key) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<K, V> entry = this.getEntryForOpaque((K)key);
|
|
+ return entry == null ? null : entry.getValueAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean containsKey(final Object key) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ // note: we need to use getValueAcquire, so that the reads from this map are ordered by acquire semantics
|
|
+ return this.get(key) != null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns {@code true} if this map contains an entry with the specified key and value at some point during this call.
|
|
+ * @param key The specified key.
|
|
+ * @param value The specified value.
|
|
+ * @return {@code true} if this map contains an entry with the specified key and value.
|
|
+ */
|
|
+ public boolean contains(final Object key, final Object value) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<K, V> entry = this.getEntryForOpaque((K)key);
|
|
+
|
|
+ if (entry == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final V entryVal = entry.getValueAcquire();
|
|
+ return entryVal == value || entryVal.equals(value);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean containsValue(final Object value) {
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V currVal = curr.getValueAcquire();
|
|
+ if (currVal == value || currVal.equals(value)) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V getOrDefault(final Object key, final V defaultValue) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<K, V> entry = this.getEntryForOpaque((K)key);
|
|
+
|
|
+ return entry == null ? defaultValue : entry.getValueAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public int size() {
|
|
+ return this.getSizeAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean isEmpty() {
|
|
+ return this.getSizeAcquire() == 0;
|
|
+ }
|
|
+
|
|
+ protected KeySet<K, V> keyset;
|
|
+ protected ValueCollection<K, V> values;
|
|
+ protected EntrySet<K, V> entrySet;
|
|
+
|
|
+ @Override
|
|
+ public Set<K> keySet() {
|
|
+ return this.keyset == null ? this.keyset = new KeySet<>(this) : this.keyset;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Collection<V> values() {
|
|
+ return this.values == null ? this.values = new ValueCollection<>(this) : this.values;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Set<Map.Entry<K, V>> entrySet() {
|
|
+ return this.entrySet == null ? this.entrySet = new EntrySet<>(this) : this.entrySet;
|
|
+ }
|
|
+
|
|
+ /* Non-MT-Safe */
|
|
+
|
|
+ protected int threshold;
|
|
+
|
|
+ protected final void checkResize(final int minCapacity) {
|
|
+ if (minCapacity <= this.threshold || this.threshold < 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ int newCapacity = minCapacity >= MAXIMUM_CAPACITY ? MAXIMUM_CAPACITY : IntegerUtil.roundCeilLog2(minCapacity);
|
|
+ if (newCapacity < 0) {
|
|
+ newCapacity = MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ if (newCapacity <= table.length) {
|
|
+ if (newCapacity == MAXIMUM_CAPACITY) {
|
|
+ return;
|
|
+ }
|
|
+ newCapacity = table.length << 1;
|
|
+ }
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<K, V>[] newTable = new TableEntry[newCapacity];
|
|
+ final int indexMask = newCapacity - 1;
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> entry = table[i]; entry != null; entry = entry.getNextPlain()) {
|
|
+ final int hash = entry.hash;
|
|
+ final int index = hash & indexMask;
|
|
+
|
|
+ /* we need to create a new entry since there could be reading threads */
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, entry.key, entry.getValuePlain());
|
|
+
|
|
+ final TableEntry<K, V> prev = newTable[index];
|
|
+
|
|
+ newTable[index] = insert;
|
|
+ insert.setNextPlain(prev);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (newCapacity == MAXIMUM_CAPACITY) {
|
|
+ this.threshold = -1; /* No more resizing */
|
|
+ } else {
|
|
+ this.threshold = getTargetCapacity(newCapacity, this.loadFactor);
|
|
+ }
|
|
+ this.setTableRelease(newTable); /* use release to publish entries in table */
|
|
+ }
|
|
+
|
|
+ protected final int addToSize(final int num) {
|
|
+ final int newSize = this.getSizePlain() + num;
|
|
+
|
|
+ this.setSizeOpaque(newSize);
|
|
+ this.checkResize(newSize);
|
|
+
|
|
+ return newSize;
|
|
+ }
|
|
+
|
|
+ protected final int removeFromSize(final int num) {
|
|
+ final int newSize = this.getSizePlain() - num;
|
|
+
|
|
+ this.setSizeOpaque(newSize);
|
|
+
|
|
+ return newSize;
|
|
+ }
|
|
+
|
|
+ /* Cannot be used to perform downsizing */
|
|
+ protected final int removeFromSizePlain(final int num) {
|
|
+ final int newSize = this.getSizePlain() - num;
|
|
+
|
|
+ this.setSizePlain(newSize);
|
|
+
|
|
+ return newSize;
|
|
+ }
|
|
+
|
|
+ protected final V put(final K key, final V value, final boolean onlyIfAbsent) {
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ final TableEntry<K, V> head = table[index];
|
|
+ if (head == null) {
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, key, value);
|
|
+ setAtIndexRelease(table, index, insert);
|
|
+ this.addToSize(1);
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ for (TableEntry<K, V> curr = head;;) {
|
|
+ if (curr.hash == hash && (key == curr.key || curr.key.equals(key))) {
|
|
+ if (onlyIfAbsent) {
|
|
+ return curr.getValuePlain();
|
|
+ }
|
|
+
|
|
+ final V currVal = curr.getValuePlain();
|
|
+ curr.setValueRelease(value);
|
|
+ return currVal;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V> next = curr.getNextPlain();
|
|
+ if (next != null) {
|
|
+ curr = next;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, key, value);
|
|
+
|
|
+ curr.setNextRelease(insert);
|
|
+ this.addToSize(1);
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Removes a key-value pair from this map if the specified predicate returns true. The specified predicate is
|
|
+ * tested with every entry in this map. Returns the number of key-value pairs removed.
|
|
+ * @param predicate The predicate to test key-value pairs against.
|
|
+ * @return The total number of key-value pairs removed from this map.
|
|
+ */
|
|
+ public int removeIf(final BiPredicate<K, V> predicate) {
|
|
+ Validate.notNull(predicate, "Null predicate");
|
|
+
|
|
+ int removed = 0;
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+
|
|
+ bin_iteration_loop:
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ TableEntry<K, V> curr = table[i];
|
|
+ if (curr == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Handle bin nodes first */
|
|
+ while (predicate.test(curr.key, curr.getValuePlain())) {
|
|
+ ++removed;
|
|
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
|
|
+
|
|
+ setAtIndexRelease(table, i, curr = curr.getNextPlain());
|
|
+
|
|
+ if (curr == null) {
|
|
+ continue bin_iteration_loop;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ TableEntry<K, V> prev;
|
|
+
|
|
+ /* curr at this point is the bin node */
|
|
+
|
|
+ for (prev = curr, curr = curr.getNextPlain(); curr != null;) {
|
|
+ /* If we want to remove, then we should hold prev, as it will be a valid entry to link on */
|
|
+ if (predicate.test(curr.key, curr.getValuePlain())) {
|
|
+ ++removed;
|
|
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
|
|
+
|
|
+ prev.setNextRelease(curr = curr.getNextPlain());
|
|
+ } else {
|
|
+ prev = curr;
|
|
+ curr = curr.getNextPlain();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return removed;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Removes a key-value pair from this map if the specified predicate returns true. The specified predicate is
|
|
+ * tested with every entry in this map. Returns the number of key-value pairs removed.
|
|
+ * @param predicate The predicate to test key-value pairs against.
|
|
+ * @return The total number of key-value pairs removed from this map.
|
|
+ */
|
|
+ public int removeEntryIf(final Predicate<? super Map.Entry<K, V>> predicate) {
|
|
+ Validate.notNull(predicate, "Null predicate");
|
|
+
|
|
+ int removed = 0;
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+
|
|
+ bin_iteration_loop:
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ TableEntry<K, V> curr = table[i];
|
|
+ if (curr == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Handle bin nodes first */
|
|
+ while (predicate.test(curr)) {
|
|
+ ++removed;
|
|
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
|
|
+
|
|
+ setAtIndexRelease(table, i, curr = curr.getNextPlain());
|
|
+
|
|
+ if (curr == null) {
|
|
+ continue bin_iteration_loop;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ TableEntry<K, V> prev;
|
|
+
|
|
+ /* curr at this point is the bin node */
|
|
+
|
|
+ for (prev = curr, curr = curr.getNextPlain(); curr != null;) {
|
|
+ /* If we want to remove, then we should hold prev, as it will be a valid entry to link on */
|
|
+ if (predicate.test(curr)) {
|
|
+ ++removed;
|
|
+ this.removeFromSizePlain(1); /* required in case predicate throws an exception */
|
|
+
|
|
+ prev.setNextRelease(curr = curr.getNextPlain());
|
|
+ } else {
|
|
+ prev = curr;
|
|
+ curr = curr.getNextPlain();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return removed;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V put(final K key, final V value) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ return this.put(key, value, false);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V putIfAbsent(final K key, final V value) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ return this.put(key, value, true);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean remove(final Object key, final Object value) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ final TableEntry<K, V> head = table[index];
|
|
+ if (head == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (head.hash == hash && (head.key == key || head.key.equals(key))) {
|
|
+ final V currVal = head.getValuePlain();
|
|
+
|
|
+ if (currVal != value && !currVal.equals(value)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ setAtIndexRelease(table, index, head.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ for (TableEntry<K, V> curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr.hash == hash && (curr.key == key || curr.key.equals(key))) {
|
|
+ final V currVal = curr.getValuePlain();
|
|
+
|
|
+ if (currVal != value && !currVal.equals(value)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ protected final V remove(final Object key, final int hash) {
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int index = (table.length - 1) & hash;
|
|
+
|
|
+ final TableEntry<K, V> head = table[index];
|
|
+ if (head == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (hash == head.hash && (head.key == key || head.key.equals(key))) {
|
|
+ setAtIndexRelease(table, index, head.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return head.getValuePlain();
|
|
+ }
|
|
+
|
|
+ for (TableEntry<K, V> curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr.hash == hash && (key == curr.key || curr.key.equals(key))) {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return curr.getValuePlain();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V remove(final Object key) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ return this.remove(key, SWMRHashTable.getHash(key));
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean replace(final K key, final V oldValue, final V newValue) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(oldValue, "Null oldValue");
|
|
+ Validate.notNull(newValue, "Null newValue");
|
|
+
|
|
+ final TableEntry<K, V> entry = this.getEntryForPlain(key);
|
|
+ if (entry == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final V currValue = entry.getValuePlain();
|
|
+ if (currValue == oldValue || currValue.equals(oldValue)) {
|
|
+ entry.setValueRelease(newValue);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V replace(final K key, final V value) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ final TableEntry<K, V> entry = this.getEntryForPlain(key);
|
|
+ if (entry == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final V prev = entry.getValuePlain();
|
|
+ entry.setValueRelease(value);
|
|
+ return prev;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void replaceAll(final BiFunction<? super K, ? super V, ? extends V> function) {
|
|
+ Validate.notNull(function, "Null function");
|
|
+
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<K, V> curr = table[i]; curr != null; curr = curr.getNextPlain()) {
|
|
+ final V value = curr.getValuePlain();
|
|
+
|
|
+ final V newValue = function.apply(curr.key, value);
|
|
+ if (newValue == null) {
|
|
+ throw new NullPointerException();
|
|
+ }
|
|
+
|
|
+ curr.setValueRelease(newValue);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public void putAll(final Map<? extends K, ? extends V> map) {
|
|
+ Validate.notNull(map, "Null map");
|
|
+
|
|
+ final int size = map.size();
|
|
+ this.checkResize(Math.max(this.getSizePlain() + size/2, size)); /* preemptively resize */
|
|
+ map.forEach(this::put);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * <p>
|
|
+ * This call is non-atomic and the order that which entries are removed is undefined. The clear operation itself
|
|
+ * is release ordered, that is, after the clear operation is performed a release fence is performed.
|
|
+ * </p>
|
|
+ */
|
|
+ @Override
|
|
+ public void clear() {
|
|
+ Arrays.fill(this.getTablePlain(), null);
|
|
+ this.setSizeRelease(0);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V compute(final K key, final BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(remappingFunction, "Null remappingFunction");
|
|
+
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ for (TableEntry<K, V> curr = table[index], prev = null;;prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr == null) {
|
|
+ final V newVal = remappingFunction.apply(key ,null);
|
|
+
|
|
+ if (newVal == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, key, newVal);
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, insert);
|
|
+ } else {
|
|
+ prev.setNextRelease(insert);
|
|
+ }
|
|
+
|
|
+ this.addToSize(1);
|
|
+
|
|
+ return newVal;
|
|
+ }
|
|
+
|
|
+ if (curr.hash == hash && (curr.key == key || curr.key.equals(key))) {
|
|
+ final V newVal = remappingFunction.apply(key, curr.getValuePlain());
|
|
+
|
|
+ if (newVal != null) {
|
|
+ curr.setValueRelease(newVal);
|
|
+ return newVal;
|
|
+ }
|
|
+
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, curr.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ }
|
|
+
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V computeIfPresent(final K key, final BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(remappingFunction, "Null remappingFunction");
|
|
+
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ for (TableEntry<K, V> curr = table[index], prev = null; curr != null; prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr.hash != hash || (curr.key != key && !curr.key.equals(key))) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final V newVal = remappingFunction.apply(key, curr.getValuePlain());
|
|
+ if (newVal != null) {
|
|
+ curr.setValueRelease(newVal);
|
|
+ return newVal;
|
|
+ }
|
|
+
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, curr.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ }
|
|
+
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V computeIfAbsent(final K key, final Function<? super K, ? extends V> mappingFunction) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(mappingFunction, "Null mappingFunction");
|
|
+
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ for (TableEntry<K, V> curr = table[index], prev = null;;prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr != null) {
|
|
+ if (curr.hash == hash && (curr.key == key || curr.key.equals(key))) {
|
|
+ return curr.getValuePlain();
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final V newVal = mappingFunction.apply(key);
|
|
+
|
|
+ if (newVal == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, key, newVal);
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, insert);
|
|
+ } else {
|
|
+ prev.setNextRelease(insert);
|
|
+ }
|
|
+
|
|
+ this.addToSize(1);
|
|
+
|
|
+ return newVal;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public V merge(final K key, final V value, final BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+ Validate.notNull(value, "Null value");
|
|
+ Validate.notNull(remappingFunction, "Null remappingFunction");
|
|
+
|
|
+ final int hash = SWMRHashTable.getHash(key);
|
|
+ final TableEntry<K, V>[] table = this.getTablePlain();
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ for (TableEntry<K, V> curr = table[index], prev = null;;prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (curr == null) {
|
|
+ final TableEntry<K, V> insert = new TableEntry<>(hash, key, value);
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, insert);
|
|
+ } else {
|
|
+ prev.setNextRelease(insert);
|
|
+ }
|
|
+
|
|
+ this.addToSize(1);
|
|
+
|
|
+ return value;
|
|
+ }
|
|
+
|
|
+ if (curr.hash == hash && (curr.key == key || curr.key.equals(key))) {
|
|
+ final V newVal = remappingFunction.apply(curr.getValuePlain(), value);
|
|
+
|
|
+ if (newVal != null) {
|
|
+ curr.setValueRelease(newVal);
|
|
+ return newVal;
|
|
+ }
|
|
+
|
|
+ if (prev == null) {
|
|
+ setAtIndexRelease(table, index, curr.getNextPlain());
|
|
+ } else {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ }
|
|
+
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class TableEntry<K, V> implements Map.Entry<K, V> {
|
|
+
|
|
+ protected static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
|
|
+
|
|
+ protected final int hash;
|
|
+ protected final K key;
|
|
+ protected V value;
|
|
+
|
|
+ protected TableEntry<K, V> next;
|
|
+
|
|
+ protected static final VarHandle VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class);
|
|
+ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class);
|
|
+
|
|
+ /* value */
|
|
+
|
|
+ protected final V getValuePlain() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final V getValueAcquire() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setValueRelease(final V to) {
|
|
+ VALUE_HANDLE.setRelease(this, to);
|
|
+ }
|
|
+
|
|
+ /* next */
|
|
+
|
|
+ protected final TableEntry<K, V> getNextPlain() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<K, V>)NEXT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<K, V> getNextOpaque() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<K, V>)NEXT_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ protected final void setNextPlain(final TableEntry<K, V> next) {
|
|
+ NEXT_HANDLE.set(this, next);
|
|
+ }
|
|
+
|
|
+ protected final void setNextRelease(final TableEntry<K, V> next) {
|
|
+ NEXT_HANDLE.setRelease(this, next);
|
|
+ }
|
|
+
|
|
+ protected TableEntry(final int hash, final K key, final V value) {
|
|
+ this.hash = hash;
|
|
+ this.key = key;
|
|
+ this.value = value;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public K getKey() {
|
|
+ return this.key;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public V getValue() {
|
|
+ return this.getValueAcquire();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public V setValue(final V value) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ protected static int hash(final Object key, final Object value) {
|
|
+ return key.hashCode() ^ (value == null ? 0 : value.hashCode());
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public int hashCode() {
|
|
+ return hash(this.key, this.getValueAcquire());
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean equals(final Object obj) {
|
|
+ if (this == obj) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (!(obj instanceof Map.Entry<?, ?> other)) {
|
|
+ return false;
|
|
+ }
|
|
+ final Object otherKey = other.getKey();
|
|
+ final Object otherValue = other.getValue();
|
|
+
|
|
+ final K thisKey = this.getKey();
|
|
+ final V thisVal = this.getValueAcquire();
|
|
+ return (thisKey == otherKey || thisKey.equals(otherKey)) &&
|
|
+ (thisVal == otherValue || thisVal.equals(otherValue));
|
|
+ }
|
|
+ }
|
|
+
|
|
+
|
|
+ protected static abstract class TableEntryIterator<K, V, T> implements Iterator<T> {
|
|
+
|
|
+ protected final TableEntry<K, V>[] table;
|
|
+ protected final SWMRHashTable<K, V> map;
|
|
+
|
|
+ /* bin which our current element resides on */
|
|
+ protected int tableIndex;
|
|
+
|
|
+ protected TableEntry<K, V> currEntry; /* curr entry, null if no more to iterate or if curr was removed or if we've just init'd */
|
|
+ protected TableEntry<K, V> nextEntry; /* may not be on the same bin as currEntry */
|
|
+
|
|
+ protected TableEntryIterator(final TableEntry<K, V>[] table, final SWMRHashTable<K, V> map) {
|
|
+ this.table = table;
|
|
+ this.map = map;
|
|
+ int tableIndex = 0;
|
|
+ for (int len = table.length; tableIndex < len; ++tableIndex) {
|
|
+ final TableEntry<K, V> entry = getAtIndexOpaque(table, tableIndex);
|
|
+ if (entry != null) {
|
|
+ this.nextEntry = entry;
|
|
+ this.tableIndex = tableIndex + 1;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ this.tableIndex = tableIndex;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean hasNext() {
|
|
+ return this.nextEntry != null;
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<K, V> advanceEntry() {
|
|
+ final TableEntry<K, V>[] table = this.table;
|
|
+ final int tableLength = table.length;
|
|
+ int tableIndex = this.tableIndex;
|
|
+ final TableEntry<K, V> curr = this.nextEntry;
|
|
+ if (curr == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ this.currEntry = curr;
|
|
+
|
|
+ // set up nextEntry
|
|
+
|
|
+ // find next in chain
|
|
+ TableEntry<K, V> next = curr.getNextOpaque();
|
|
+
|
|
+ if (next != null) {
|
|
+ this.nextEntry = next;
|
|
+ return curr;
|
|
+ }
|
|
+
|
|
+ // nothing in chain, so find next available bin
|
|
+ for (;tableIndex < tableLength; ++tableIndex) {
|
|
+ next = getAtIndexOpaque(table, tableIndex);
|
|
+ if (next != null) {
|
|
+ this.nextEntry = next;
|
|
+ this.tableIndex = tableIndex + 1;
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.nextEntry = null;
|
|
+ this.tableIndex = tableIndex;
|
|
+ return curr;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void remove() {
|
|
+ final TableEntry<K, V> curr = this.currEntry;
|
|
+ if (curr == null) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ this.map.remove(curr.key, curr.hash);
|
|
+
|
|
+ this.currEntry = null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class ValueIterator<K, V> extends TableEntryIterator<K, V, V> {
|
|
+
|
|
+ protected ValueIterator(final TableEntry<K, V>[] table, final SWMRHashTable<K, V> map) {
|
|
+ super(table, map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public V next() {
|
|
+ final TableEntry<K, V> entry = this.advanceEntry();
|
|
+
|
|
+ if (entry == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ return entry.getValueAcquire();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class KeyIterator<K, V> extends TableEntryIterator<K, V, K> {
|
|
+
|
|
+ protected KeyIterator(final TableEntry<K, V>[] table, final SWMRHashTable<K, V> map) {
|
|
+ super(table, map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public K next() {
|
|
+ final TableEntry<K, V> curr = this.advanceEntry();
|
|
+
|
|
+ if (curr == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ return curr.key;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class EntryIterator<K, V> extends TableEntryIterator<K, V, Map.Entry<K, V>> {
|
|
+
|
|
+ protected EntryIterator(final TableEntry<K, V>[] table, final SWMRHashTable<K, V> map) {
|
|
+ super(table, map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Map.Entry<K, V> next() {
|
|
+ final TableEntry<K, V> curr = this.advanceEntry();
|
|
+
|
|
+ if (curr == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static abstract class ViewCollection<K, V, T> implements Collection<T> {
|
|
+
|
|
+ protected final SWMRHashTable<K, V> map;
|
|
+
|
|
+ protected ViewCollection(final SWMRHashTable<K, V> map) {
|
|
+ this.map = map;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean add(final T element) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean addAll(final Collection<? extends T> collections) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean removeAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ boolean modified = false;
|
|
+ for (final Object element : collection) {
|
|
+ modified |= this.remove(element);
|
|
+ }
|
|
+ return modified;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int size() {
|
|
+ return this.map.size();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isEmpty() {
|
|
+ return this.size() == 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void clear() {
|
|
+ this.map.clear();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean containsAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ for (final Object element : collection) {
|
|
+ if (!this.contains(element)) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Object[] toArray() {
|
|
+ final List<T> list = new ArrayList<>(this.size());
|
|
+
|
|
+ this.forEach(list::add);
|
|
+
|
|
+ return list.toArray();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <E> E[] toArray(final E[] array) {
|
|
+ final List<T> list = new ArrayList<>(this.size());
|
|
+
|
|
+ this.forEach(list::add);
|
|
+
|
|
+ return list.toArray(array);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <E> E[] toArray(final IntFunction<E[]> generator) {
|
|
+ final List<T> list = new ArrayList<>(this.size());
|
|
+
|
|
+ this.forEach(list::add);
|
|
+
|
|
+ return list.toArray(generator);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int hashCode() {
|
|
+ int hash = 0;
|
|
+ for (final T element : this) {
|
|
+ hash += element == null ? 0 : element.hashCode();
|
|
+ }
|
|
+ return hash;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Spliterator<T> spliterator() { // TODO implement
|
|
+ return Spliterators.spliterator(this, Spliterator.NONNULL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static abstract class ViewSet<K, V, T> extends ViewCollection<K, V, T> implements Set<T> {
|
|
+
|
|
+ protected ViewSet(final SWMRHashTable<K, V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean equals(final Object obj) {
|
|
+ if (this == obj) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (!(obj instanceof Set)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final Set<?> other = (Set<?>)obj;
|
|
+ if (other.size() != this.size()) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return this.containsAll(other);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class EntrySet<K, V> extends ViewSet<K, V, Map.Entry<K, V>> implements Set<Map.Entry<K, V>> {
|
|
+
|
|
+ protected EntrySet(final SWMRHashTable<K, V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean remove(final Object object) {
|
|
+ if (!(object instanceof Map.Entry<?, ?> entry)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final Object key;
|
|
+ final Object value;
|
|
+
|
|
+ try {
|
|
+ key = entry.getKey();
|
|
+ value = entry.getValue();
|
|
+ } catch (final IllegalStateException ex) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return this.map.remove(key, value);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean removeIf(final Predicate<? super Map.Entry<K, V>> filter) {
|
|
+ Validate.notNull(filter, "Null filter");
|
|
+
|
|
+ return this.map.removeEntryIf(filter) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean retainAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ return this.map.removeEntryIf((final Map.Entry<K, V> entry) -> {
|
|
+ return !collection.contains(entry);
|
|
+ }) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterator<Map.Entry<K, V>> iterator() {
|
|
+ return new EntryIterator<>(this.map.getTableAcquire(), this.map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEach(final Consumer<? super Map.Entry<K, V>> action) {
|
|
+ this.map.forEach(action);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean contains(final Object object) {
|
|
+ if (!(object instanceof Map.Entry<?, ?> entry)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final Object key;
|
|
+ final Object value;
|
|
+
|
|
+ try {
|
|
+ key = entry.getKey();
|
|
+ value = entry.getValue();
|
|
+ } catch (final IllegalStateException ex) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return this.map.contains(key, value);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return CollectionUtil.toString(this, "SWMRHashTableEntrySet");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class KeySet<K, V> extends ViewSet<K, V, K> {
|
|
+
|
|
+ protected KeySet(final SWMRHashTable<K, V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterator<K> iterator() {
|
|
+ return new KeyIterator<>(this.map.getTableAcquire(), this.map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEach(final Consumer<? super K> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ this.map.forEachKey(action);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean contains(final Object key) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ return this.map.containsKey(key);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean remove(final Object key) {
|
|
+ Validate.notNull(key, "Null key");
|
|
+
|
|
+ return this.map.remove(key) != null;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean retainAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ return this.map.removeIf((final K key, final V value) -> {
|
|
+ return !collection.contains(key);
|
|
+ }) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean removeIf(final Predicate<? super K> filter) {
|
|
+ Validate.notNull(filter, "Null filter");
|
|
+
|
|
+ return this.map.removeIf((final K key, final V value) -> {
|
|
+ return filter.test(key);
|
|
+ }) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return CollectionUtil.toString(this, "SWMRHashTableKeySet");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class ValueCollection<K, V> extends ViewSet<K, V, V> implements Collection<V> {
|
|
+
|
|
+ protected ValueCollection(final SWMRHashTable<K, V> map) {
|
|
+ super(map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterator<V> iterator() {
|
|
+ return new ValueIterator<>(this.map.getTableAcquire(), this.map);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void forEach(final Consumer<? super V> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ this.map.forEachValue(action);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean contains(final Object object) {
|
|
+ Validate.notNull(object, "Null object");
|
|
+
|
|
+ return this.map.containsValue(object);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean remove(final Object object) {
|
|
+ Validate.notNull(object, "Null object");
|
|
+
|
|
+ final Iterator<V> itr = this.iterator();
|
|
+ while (itr.hasNext()) {
|
|
+ final V val = itr.next();
|
|
+ if (val == object || val.equals(object)) {
|
|
+ itr.remove();
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean removeIf(final Predicate<? super V> filter) {
|
|
+ Validate.notNull(filter, "Null filter");
|
|
+
|
|
+ return this.map.removeIf((final K key, final V value) -> {
|
|
+ return filter.test(value);
|
|
+ }) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean retainAll(final Collection<?> collection) {
|
|
+ Validate.notNull(collection, "Null collection");
|
|
+
|
|
+ return this.map.removeIf((final K key, final V value) -> {
|
|
+ return !collection.contains(value);
|
|
+ }) != 0;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return CollectionUtil.toString(this, "SWMRHashTableValues");
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..bb301a9f4e3ac919552eef68afc73569d50674db
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
|
|
@@ -0,0 +1,674 @@
|
|
+package ca.spottedleaf.concurrentutil.map;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.function.BiLongObjectConsumer;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.HashUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.IntegerUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.Validate;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Arrays;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.LongConsumer;
|
|
+
|
|
+// trimmed down version of SWMRHashTable
|
|
+public class SWMRLong2ObjectHashTable<V> {
|
|
+
|
|
+ protected int size;
|
|
+
|
|
+ protected TableEntry<V>[] table;
|
|
+
|
|
+ protected final float loadFactor;
|
|
+
|
|
+ protected static final VarHandle SIZE_HANDLE = ConcurrentUtil.getVarHandle(SWMRLong2ObjectHashTable.class, "size", int.class);
|
|
+ protected static final VarHandle TABLE_HANDLE = ConcurrentUtil.getVarHandle(SWMRLong2ObjectHashTable.class, "table", TableEntry[].class);
|
|
+
|
|
+ /* size */
|
|
+
|
|
+ protected final int getSizePlain() {
|
|
+ return (int)SIZE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final int getSizeOpaque() {
|
|
+ return (int)SIZE_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ protected final int getSizeAcquire() {
|
|
+ return (int)SIZE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setSizePlain(final int value) {
|
|
+ SIZE_HANDLE.set(this, value);
|
|
+ }
|
|
+
|
|
+ protected final void setSizeOpaque(final int value) {
|
|
+ SIZE_HANDLE.setOpaque(this, value);
|
|
+ }
|
|
+
|
|
+ protected final void setSizeRelease(final int value) {
|
|
+ SIZE_HANDLE.setRelease(this, value);
|
|
+ }
|
|
+
|
|
+ /* table */
|
|
+
|
|
+ protected final TableEntry<V>[] getTablePlain() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>[])TABLE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<V>[] getTableAcquire() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>[])TABLE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setTablePlain(final TableEntry<V>[] table) {
|
|
+ TABLE_HANDLE.set(this, table);
|
|
+ }
|
|
+
|
|
+ protected final void setTableRelease(final TableEntry<V>[] table) {
|
|
+ TABLE_HANDLE.setRelease(this, table);
|
|
+ }
|
|
+
|
|
+ protected static final int DEFAULT_CAPACITY = 16;
|
|
+ protected static final float DEFAULT_LOAD_FACTOR = 0.75f;
|
|
+ protected static final int MAXIMUM_CAPACITY = Integer.MIN_VALUE >>> 1;
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a capacity of {@code 16} and load factor of {@code 0.75f}.
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable() {
|
|
+ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with the specified capacity and load factor of {@code 0.75f}.
|
|
+ * @param capacity specified initial capacity, > 0
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable(final int capacity) {
|
|
+ this(capacity, DEFAULT_LOAD_FACTOR);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with the specified capacity and load factor.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param loadFactor specified load factor, > 0 && finite
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable(final int capacity, final float loadFactor) {
|
|
+ final int tableSize = getCapacityFor(capacity);
|
|
+
|
|
+ if (loadFactor <= 0.0 || !Float.isFinite(loadFactor)) {
|
|
+ throw new IllegalArgumentException("Invalid load factor: " + loadFactor);
|
|
+ }
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<V>[] table = new TableEntry[tableSize];
|
|
+ this.setTablePlain(table);
|
|
+
|
|
+ if (tableSize == MAXIMUM_CAPACITY) {
|
|
+ this.threshold = -1;
|
|
+ } else {
|
|
+ this.threshold = getTargetCapacity(tableSize, loadFactor);
|
|
+ }
|
|
+
|
|
+ this.loadFactor = loadFactor;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a capacity of {@code 16} or the specified map's size, whichever is larger, and
|
|
+ * with a load factor of {@code 0.75f}.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable(final SWMRLong2ObjectHashTable<V> other) {
|
|
+ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, other);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a minimum capacity of the specified capacity or the specified map's size, whichever is larger, and
|
|
+ * with a load factor of {@code 0.75f}.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable(final int capacity, final SWMRLong2ObjectHashTable<V> other) {
|
|
+ this(capacity, DEFAULT_LOAD_FACTOR, other);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Constructs this map with a min capacity of the specified capacity or the specified map's size, whichever is larger, and
|
|
+ * with the specified load factor.
|
|
+ * All of the specified map's entries are copied into this map.
|
|
+ * @param capacity specified capacity, > 0
|
|
+ * @param loadFactor specified load factor, > 0 && finite
|
|
+ * @param other The specified map.
|
|
+ */
|
|
+ public SWMRLong2ObjectHashTable(final int capacity, final float loadFactor, final SWMRLong2ObjectHashTable<V> other) {
|
|
+ this(Math.max(Validate.notNull(other, "Null map").size(), capacity), loadFactor);
|
|
+ this.putAll(other);
|
|
+ }
|
|
+
|
|
+ protected static <V> TableEntry<V> getAtIndexOpaque(final TableEntry<V>[] table, final int index) {
|
|
+ // noinspection unchecked
|
|
+ return (TableEntry<V>)TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getOpaque(table, index);
|
|
+ }
|
|
+
|
|
+ protected static <V> void setAtIndexRelease(final TableEntry<V>[] table, final int index, final TableEntry<V> value) {
|
|
+ TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value);
|
|
+ }
|
|
+
|
|
+ public final float getLoadFactor() {
|
|
+ return this.loadFactor;
|
|
+ }
|
|
+
|
|
+ protected static int getCapacityFor(final int capacity) {
|
|
+ if (capacity <= 0) {
|
|
+ throw new IllegalArgumentException("Invalid capacity: " + capacity);
|
|
+ }
|
|
+ if (capacity >= MAXIMUM_CAPACITY) {
|
|
+ return MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ return IntegerUtil.roundCeilLog2(capacity);
|
|
+ }
|
|
+
|
|
+ /** Callers must still use acquire when reading the value of the entry. */
|
|
+ protected final TableEntry<V> getEntryForOpaque(final long key) {
|
|
+ final int hash = SWMRLong2ObjectHashTable.getHash(key);
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
|
|
+ if (key == curr.key) {
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<V> getEntryForPlain(final long key) {
|
|
+ final int hash = SWMRLong2ObjectHashTable.getHash(key);
|
|
+ final TableEntry<V>[] table = this.getTablePlain();
|
|
+
|
|
+ for (TableEntry<V> curr = table[hash & (table.length - 1)]; curr != null; curr = curr.getNextPlain()) {
|
|
+ if (key == curr.key) {
|
|
+ return curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /* MT-Safe */
|
|
+
|
|
+ /** must be deterministic given a key */
|
|
+ protected static int getHash(final long key) {
|
|
+ return (int)HashUtil.mix(key);
|
|
+ }
|
|
+
|
|
+ // rets -1 if capacity*loadFactor is too large
|
|
+ protected static int getTargetCapacity(final int capacity, final float loadFactor) {
|
|
+ final double ret = (double)capacity * (double)loadFactor;
|
|
+ if (Double.isInfinite(ret) || ret >= ((double)Integer.MAX_VALUE)) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return (int)ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public boolean equals(final Object obj) {
|
|
+ if (this == obj) {
|
|
+ return true;
|
|
+ }
|
|
+ /* Make no attempt to deal with concurrent modifications */
|
|
+ if (!(obj instanceof SWMRLong2ObjectHashTable<?> other)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (this.size() != other.size()) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ final Object otherValue = other.get(curr.key);
|
|
+ if (otherValue == null || (value != otherValue && value.equals(otherValue))) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public int hashCode() {
|
|
+ /* Make no attempt to deal with concurrent modifications */
|
|
+ int hash = 0;
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ hash += curr.hashCode();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return hash;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ final StringBuilder builder = new StringBuilder(64);
|
|
+ builder.append("SingleWriterMultiReaderHashMap:{");
|
|
+
|
|
+ this.forEach((final long key, final V value) -> {
|
|
+ builder.append("{key: \"").append(key).append("\", value: \"").append(value).append("\"}");
|
|
+ });
|
|
+
|
|
+ return builder.append('}').toString();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ @Override
|
|
+ public SWMRLong2ObjectHashTable<V> clone() {
|
|
+ return new SWMRLong2ObjectHashTable<>(this.getTableAcquire().length, this.loadFactor, this);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public void forEach(final Consumer<? super TableEntry<V>> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ action.accept(curr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public void forEach(final BiLongObjectConsumer<? super V> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ action.accept(curr.key, value);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Provides the specified consumer with all keys contained within this map.
|
|
+ * @param action The specified consumer.
|
|
+ */
|
|
+ public void forEachKey(final LongConsumer action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ action.accept(curr.key);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Provides the specified consumer with all values contained within this map. Equivalent to {@code map.values().forEach(Consumer)}.
|
|
+ * @param action The specified consumer.
|
|
+ */
|
|
+ public void forEachValue(final Consumer<? super V> action) {
|
|
+ Validate.notNull(action, "Null action");
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTableAcquire();
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> curr = getAtIndexOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
|
|
+ final V value = curr.getValueAcquire();
|
|
+
|
|
+ action.accept(value);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public V get(final long key) {
|
|
+ final TableEntry<V> entry = this.getEntryForOpaque(key);
|
|
+ return entry == null ? null : entry.getValueAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public boolean containsKey(final long key) {
|
|
+ // note: we need to use getValueAcquire, so that the reads from this map are ordered by acquire semantics
|
|
+ return this.get(key) != null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public V getOrDefault(final long key, final V defaultValue) {
|
|
+ final TableEntry<V> entry = this.getEntryForOpaque(key);
|
|
+
|
|
+ return entry == null ? defaultValue : entry.getValueAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public int size() {
|
|
+ return this.getSizeAcquire();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public boolean isEmpty() {
|
|
+ return this.getSizeAcquire() == 0;
|
|
+ }
|
|
+
|
|
+ /* Non-MT-Safe */
|
|
+
|
|
+ protected int threshold;
|
|
+
|
|
+ protected final void checkResize(final int minCapacity) {
|
|
+ if (minCapacity <= this.threshold || this.threshold < 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final TableEntry<V>[] table = this.getTablePlain();
|
|
+ int newCapacity = minCapacity >= MAXIMUM_CAPACITY ? MAXIMUM_CAPACITY : IntegerUtil.roundCeilLog2(minCapacity);
|
|
+ if (newCapacity < 0) {
|
|
+ newCapacity = MAXIMUM_CAPACITY;
|
|
+ }
|
|
+ if (newCapacity <= table.length) {
|
|
+ if (newCapacity == MAXIMUM_CAPACITY) {
|
|
+ return;
|
|
+ }
|
|
+ newCapacity = table.length << 1;
|
|
+ }
|
|
+
|
|
+ //noinspection unchecked
|
|
+ final TableEntry<V>[] newTable = new TableEntry[newCapacity];
|
|
+ final int indexMask = newCapacity - 1;
|
|
+
|
|
+ for (int i = 0, len = table.length; i < len; ++i) {
|
|
+ for (TableEntry<V> entry = table[i]; entry != null; entry = entry.getNextPlain()) {
|
|
+ final long key = entry.key;
|
|
+ final int hash = SWMRLong2ObjectHashTable.getHash(key);
|
|
+ final int index = hash & indexMask;
|
|
+
|
|
+ /* we need to create a new entry since there could be reading threads */
|
|
+ final TableEntry<V> insert = new TableEntry<>(key, entry.getValuePlain());
|
|
+
|
|
+ final TableEntry<V> prev = newTable[index];
|
|
+
|
|
+ newTable[index] = insert;
|
|
+ insert.setNextPlain(prev);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (newCapacity == MAXIMUM_CAPACITY) {
|
|
+ this.threshold = -1; /* No more resizing */
|
|
+ } else {
|
|
+ this.threshold = getTargetCapacity(newCapacity, this.loadFactor);
|
|
+ }
|
|
+ this.setTableRelease(newTable); /* use release to publish entries in table */
|
|
+ }
|
|
+
|
|
+ protected final int addToSize(final int num) {
|
|
+ final int newSize = this.getSizePlain() + num;
|
|
+
|
|
+ this.setSizeOpaque(newSize);
|
|
+ this.checkResize(newSize);
|
|
+
|
|
+ return newSize;
|
|
+ }
|
|
+
|
|
+ protected final int removeFromSize(final int num) {
|
|
+ final int newSize = this.getSizePlain() - num;
|
|
+
|
|
+ this.setSizeOpaque(newSize);
|
|
+
|
|
+ return newSize;
|
|
+ }
|
|
+
|
|
+ protected final V put(final long key, final V value, final boolean onlyIfAbsent) {
|
|
+ final TableEntry<V>[] table = this.getTablePlain();
|
|
+ final int hash = SWMRLong2ObjectHashTable.getHash(key);
|
|
+ final int index = hash & (table.length - 1);
|
|
+
|
|
+ final TableEntry<V> head = table[index];
|
|
+ if (head == null) {
|
|
+ final TableEntry<V> insert = new TableEntry<>(key, value);
|
|
+ setAtIndexRelease(table, index, insert);
|
|
+ this.addToSize(1);
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ for (TableEntry<V> curr = head;;) {
|
|
+ if (key == curr.key) {
|
|
+ if (onlyIfAbsent) {
|
|
+ return curr.getValuePlain();
|
|
+ }
|
|
+
|
|
+ final V currVal = curr.getValuePlain();
|
|
+ curr.setValueRelease(value);
|
|
+ return currVal;
|
|
+ }
|
|
+
|
|
+ final TableEntry<V> next = curr.getNextPlain();
|
|
+ if (next != null) {
|
|
+ curr = next;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final TableEntry<V> insert = new TableEntry<>(key, value);
|
|
+
|
|
+ curr.setNextRelease(insert);
|
|
+ this.addToSize(1);
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public V put(final long key, final V value) {
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ return this.put(key, value, false);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public V putIfAbsent(final long key, final V value) {
|
|
+ Validate.notNull(value, "Null value");
|
|
+
|
|
+ return this.put(key, value, true);
|
|
+ }
|
|
+
|
|
+ protected final V remove(final long key, final int hash) {
|
|
+ final TableEntry<V>[] table = this.getTablePlain();
|
|
+ final int index = (table.length - 1) & hash;
|
|
+
|
|
+ final TableEntry<V> head = table[index];
|
|
+ if (head == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (head.key == key) {
|
|
+ setAtIndexRelease(table, index, head.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return head.getValuePlain();
|
|
+ }
|
|
+
|
|
+ for (TableEntry<V> curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (key == curr.key) {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return curr.getValuePlain();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ protected final V remove(final long key, final int hash, final V expect) {
|
|
+ final TableEntry<V>[] table = this.getTablePlain();
|
|
+ final int index = (table.length - 1) & hash;
|
|
+
|
|
+ final TableEntry<V> head = table[index];
|
|
+ if (head == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (head.key == key) {
|
|
+ final V val = head.value;
|
|
+ if (val == expect || val.equals(expect)) {
|
|
+ setAtIndexRelease(table, index, head.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return head.getValuePlain();
|
|
+ } else {
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (TableEntry<V> curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
|
|
+ if (key == curr.key) {
|
|
+ final V val = curr.value;
|
|
+ if (val == expect || val.equals(expect)) {
|
|
+ prev.setNextRelease(curr.getNextPlain());
|
|
+ this.removeFromSize(1);
|
|
+
|
|
+ return curr.getValuePlain();
|
|
+ } else {
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public V remove(final long key) {
|
|
+ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key));
|
|
+ }
|
|
+
|
|
+ public boolean remove(final long key, final V expect) {
|
|
+ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key), expect) != null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ */
|
|
+ public void putAll(final SWMRLong2ObjectHashTable<? extends V> map) {
|
|
+ Validate.notNull(map, "Null map");
|
|
+
|
|
+ final int size = map.size();
|
|
+ this.checkResize(Math.max(this.getSizePlain() + size/2, size)); /* preemptively resize */
|
|
+ map.forEach(this::put);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * {@inheritDoc}
|
|
+ * <p>
|
|
+ * This call is non-atomic and the order that which entries are removed is undefined. The clear operation itself
|
|
+ * is release ordered, that is, after the clear operation is performed a release fence is performed.
|
|
+ * </p>
|
|
+ */
|
|
+ public void clear() {
|
|
+ Arrays.fill(this.getTablePlain(), null);
|
|
+ this.setSizeRelease(0);
|
|
+ }
|
|
+
|
|
+ public static final class TableEntry<V> {
|
|
+
|
|
+ protected static final VarHandle TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class);
|
|
+
|
|
+ protected final long key;
|
|
+ protected V value;
|
|
+
|
|
+ protected TableEntry<V> next;
|
|
+
|
|
+ protected static final VarHandle VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class);
|
|
+ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class);
|
|
+
|
|
+ /* value */
|
|
+
|
|
+ protected final V getValuePlain() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final V getValueAcquire() {
|
|
+ //noinspection unchecked
|
|
+ return (V)VALUE_HANDLE.getAcquire(this);
|
|
+ }
|
|
+
|
|
+ protected final void setValueRelease(final V to) {
|
|
+ VALUE_HANDLE.setRelease(this, to);
|
|
+ }
|
|
+
|
|
+ /* next */
|
|
+
|
|
+ protected final TableEntry<V> getNextPlain() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)NEXT_HANDLE.get(this);
|
|
+ }
|
|
+
|
|
+ protected final TableEntry<V> getNextOpaque() {
|
|
+ //noinspection unchecked
|
|
+ return (TableEntry<V>)NEXT_HANDLE.getOpaque(this);
|
|
+ }
|
|
+
|
|
+ protected final void setNextPlain(final TableEntry<V> next) {
|
|
+ NEXT_HANDLE.set(this, next);
|
|
+ }
|
|
+
|
|
+ protected final void setNextRelease(final TableEntry<V> next) {
|
|
+ NEXT_HANDLE.setRelease(this, next);
|
|
+ }
|
|
+
|
|
+ protected TableEntry(final long key, final V value) {
|
|
+ this.key = key;
|
|
+ this.value = value;
|
|
+ }
|
|
+
|
|
+ public long getKey() {
|
|
+ return this.key;
|
|
+ }
|
|
+
|
|
+ public V getValue() {
|
|
+ return this.getValueAcquire();
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..85e6ef636d435a0ee4bf3e0760b0c87422c520a1
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
|
|
@@ -0,0 +1,564 @@
|
|
+package ca.spottedleaf.concurrentutil.scheduler;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.set.LinkedSortedSet;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.concurrentutil.util.TimeUtil;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.BitSet;
|
|
+import java.util.Comparator;
|
|
+import java.util.PriorityQueue;
|
|
+import java.util.concurrent.ThreadFactory;
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+import java.util.function.BooleanSupplier;
|
|
+
|
|
+/**
|
|
+ * @deprecated To be replaced
|
|
+ */
|
|
+@Deprecated
|
|
+public class SchedulerThreadPool {
|
|
+
|
|
+ public static final long DEADLINE_NOT_SET = Long.MIN_VALUE;
|
|
+
|
|
+ private static final Comparator<SchedulableTick> TICK_COMPARATOR_BY_TIME = (final SchedulableTick t1, final SchedulableTick t2) -> {
|
|
+ final int timeCompare = TimeUtil.compareTimes(t1.scheduledStart, t2.scheduledStart);
|
|
+ if (timeCompare != 0) {
|
|
+ return timeCompare;
|
|
+ }
|
|
+
|
|
+ return Long.compare(t1.id, t2.id);
|
|
+ };
|
|
+
|
|
+ private final TickThreadRunner[] runners;
|
|
+ private final Thread[] threads;
|
|
+ private final LinkedSortedSet<SchedulableTick> awaiting = new LinkedSortedSet<>(TICK_COMPARATOR_BY_TIME);
|
|
+ private final PriorityQueue<SchedulableTick> queued = new PriorityQueue<>(TICK_COMPARATOR_BY_TIME);
|
|
+ private final BitSet idleThreads;
|
|
+
|
|
+ private final Object scheduleLock = new Object();
|
|
+
|
|
+ private volatile boolean halted;
|
|
+
|
|
+ /**
|
|
+ * Creates, but does not start, a scheduler thread pool with the specified number of threads
|
|
+ * created using the specified thread factory.
|
|
+ * @param threads Specified number of threads
|
|
+ * @param threadFactory Specified thread factory
|
|
+ * @see #start()
|
|
+ */
|
|
+ public SchedulerThreadPool(final int threads, final ThreadFactory threadFactory) {
|
|
+ final BitSet idleThreads = new BitSet(threads);
|
|
+ for (int i = 0; i < threads; ++i) {
|
|
+ idleThreads.set(i);
|
|
+ }
|
|
+ this.idleThreads = idleThreads;
|
|
+
|
|
+ final TickThreadRunner[] runners = new TickThreadRunner[threads];
|
|
+ final Thread[] t = new Thread[threads];
|
|
+ for (int i = 0; i < threads; ++i) {
|
|
+ runners[i] = new TickThreadRunner(i, this);
|
|
+ t[i] = threadFactory.newThread(runners[i]);
|
|
+ }
|
|
+
|
|
+ this.threads = t;
|
|
+ this.runners = runners;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Starts the threads in this pool.
|
|
+ */
|
|
+ public void start() {
|
|
+ for (final Thread thread : this.threads) {
|
|
+ thread.start();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Attempts to prevent further execution of tasks, optionally waiting for the scheduler threads to die.
|
|
+ *
|
|
+ * @param sync Whether to wait for the scheduler threads to die.
|
|
+ * @param maxWaitNS The maximum time, in ns, to wait for the scheduler threads to die.
|
|
+ * @return {@code true} if sync was false, or if sync was true and the scheduler threads died before the timeout.
|
|
+ * Otherwise, returns {@code false} if the time elapsed exceeded the maximum wait time.
|
|
+ */
|
|
+ public boolean halt(final boolean sync, final long maxWaitNS) {
|
|
+ this.halted = true;
|
|
+ for (final Thread thread : this.threads) {
|
|
+ // force response to halt
|
|
+ LockSupport.unpark(thread);
|
|
+ }
|
|
+ final long time = System.nanoTime();
|
|
+ if (sync) {
|
|
+ // start at 10 * 0.5ms -> 5ms
|
|
+ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
|
|
+ boolean allDead = true;
|
|
+ for (final Thread thread : this.threads) {
|
|
+ if (thread.isAlive()) {
|
|
+ allDead = false;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (allDead) {
|
|
+ return true;
|
|
+ }
|
|
+ if ((System.nanoTime() - time) >= maxWaitNS) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns an array of the underlying scheduling threads.
|
|
+ */
|
|
+ public Thread[] getThreads() {
|
|
+ return this.threads.clone();
|
|
+ }
|
|
+
|
|
+ private void insertFresh(final SchedulableTick task) {
|
|
+ final TickThreadRunner[] runners = this.runners;
|
|
+
|
|
+ final int firstIdleThread = this.idleThreads.nextSetBit(0);
|
|
+
|
|
+ if (firstIdleThread != -1) {
|
|
+ // push to idle thread
|
|
+ this.idleThreads.clear(firstIdleThread);
|
|
+ final TickThreadRunner runner = runners[firstIdleThread];
|
|
+ task.awaitingLink = this.awaiting.addLast(task);
|
|
+ runner.acceptTask(task);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // try to replace the last awaiting task
|
|
+ final SchedulableTick last = this.awaiting.last();
|
|
+
|
|
+ if (last != null && TICK_COMPARATOR_BY_TIME.compare(task, last) < 0) {
|
|
+ // need to replace the last task
|
|
+ this.awaiting.pollLast();
|
|
+ last.awaitingLink = null;
|
|
+ task.awaitingLink = this.awaiting.addLast(task);
|
|
+ // need to add task to queue to be picked up later
|
|
+ this.queued.add(last);
|
|
+
|
|
+ final TickThreadRunner runner = last.ownedBy;
|
|
+ runner.replaceTask(task);
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // add to queue, will be picked up later
|
|
+ this.queued.add(task);
|
|
+ }
|
|
+
|
|
+ private void takeTask(final TickThreadRunner runner, final SchedulableTick tick) {
|
|
+ if (!this.awaiting.remove(tick.awaitingLink)) {
|
|
+ throw new IllegalStateException("Task is not in awaiting");
|
|
+ }
|
|
+ tick.awaitingLink = null;
|
|
+ }
|
|
+
|
|
+ private SchedulableTick returnTask(final TickThreadRunner runner, final SchedulableTick reschedule) {
|
|
+ if (reschedule != null) {
|
|
+ this.queued.add(reschedule);
|
|
+ }
|
|
+ final SchedulableTick ret = this.queued.poll();
|
|
+ if (ret == null) {
|
|
+ this.idleThreads.set(runner.id);
|
|
+ } else {
|
|
+ ret.awaitingLink = this.awaiting.addLast(ret);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules the specified task to be executed on this thread pool.
|
|
+ * @param task Specified task
|
|
+ * @throws IllegalStateException If the task is already scheduled
|
|
+ * @see SchedulableTick
|
|
+ */
|
|
+ public void schedule(final SchedulableTick task) {
|
|
+ synchronized (this.scheduleLock) {
|
|
+ if (!task.tryMarkScheduled()) {
|
|
+ throw new IllegalStateException("Task " + task + " is already scheduled or cancelled");
|
|
+ }
|
|
+
|
|
+ task.schedulerOwnedBy = this;
|
|
+
|
|
+ this.insertFresh(task);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Updates the tasks scheduled start to the maximum of its current scheduled start and the specified
|
|
+ * new start. If the task is not scheduled, returns {@code false}. Otherwise, returns whether the
|
|
+ * scheduled start was updated. Undefined behavior of the specified task is scheduled in another executor.
|
|
+ * @param task Specified task
|
|
+ * @param newStart Specified new start
|
|
+ */
|
|
+ public boolean updateTickStartToMax(final SchedulableTick task, final long newStart) {
|
|
+ synchronized (this.scheduleLock) {
|
|
+ if (TimeUtil.compareTimes(newStart, task.getScheduledStart()) <= 0) {
|
|
+ return false;
|
|
+ }
|
|
+ if (this.queued.remove(task)) {
|
|
+ task.setScheduledStart(newStart);
|
|
+ this.queued.add(task);
|
|
+ return true;
|
|
+ }
|
|
+ if (task.awaitingLink != null) {
|
|
+ this.awaiting.remove(task.awaitingLink);
|
|
+ task.awaitingLink = null;
|
|
+
|
|
+ // re-queue task
|
|
+ task.setScheduledStart(newStart);
|
|
+ this.queued.add(task);
|
|
+
|
|
+ // now we need to replace the task the runner was waiting for
|
|
+ final TickThreadRunner runner = task.ownedBy;
|
|
+ final SchedulableTick replace = this.queued.poll();
|
|
+
|
|
+ // replace cannot be null, since we have added a task to queued
|
|
+ if (replace != task) {
|
|
+ runner.replaceTask(replace);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns {@code null} if the task is not scheduled, returns {@code TRUE} if the task was cancelled
|
|
+ * and was queued to execute, returns {@code FALSE} if the task was cancelled but was executing.
|
|
+ */
|
|
+ public Boolean tryRetire(final SchedulableTick task) {
|
|
+ if (task.schedulerOwnedBy != this) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ synchronized (this.scheduleLock) {
|
|
+ if (this.queued.remove(task)) {
|
|
+ // cancelled, and no runner owns it - so return
|
|
+ return Boolean.TRUE;
|
|
+ }
|
|
+ if (task.awaitingLink != null) {
|
|
+ this.awaiting.remove(task.awaitingLink);
|
|
+ task.awaitingLink = null;
|
|
+ // here we need to replace the task the runner was waiting for
|
|
+ final TickThreadRunner runner = task.ownedBy;
|
|
+ final SchedulableTick replace = this.queued.poll();
|
|
+
|
|
+ if (replace == null) {
|
|
+ // nothing to replace with, set to idle
|
|
+ this.idleThreads.set(runner.id);
|
|
+ runner.forceIdle();
|
|
+ } else {
|
|
+ runner.replaceTask(replace);
|
|
+ }
|
|
+
|
|
+ return Boolean.TRUE;
|
|
+ }
|
|
+
|
|
+ // could not find it in queue
|
|
+ return task.tryMarkCancelled() ? Boolean.FALSE : null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Indicates that intermediate tasks are available to be executed by the task.
|
|
+ * <p>
|
|
+ * Note: currently a no-op
|
|
+ * </p>
|
|
+ * @param task The specified task
|
|
+ * @see SchedulableTick
|
|
+ */
|
|
+ public void notifyTasks(final SchedulableTick task) {
|
|
+ // Not implemented
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Represents a tickable task that can be scheduled into a {@link SchedulerThreadPool}.
|
|
+ * <p>
|
|
+ * A tickable task is expected to run on a fixed interval, which is determined by
|
|
+ * the {@link SchedulerThreadPool}.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * A tickable task can have intermediate tasks that can be executed before its tick method is ran. Instead of
|
|
+ * the {@link SchedulerThreadPool} parking in-between ticks, the scheduler will instead drain
|
|
+ * intermediate tasks from scheduled tasks. The parsing of intermediate tasks allows the scheduler to take
|
|
+ * advantage of downtime to reduce the intermediate task load from tasks once they begin ticking.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * It is guaranteed that {@link #runTick()} and {@link #runTasks(BooleanSupplier)} are never
|
|
+ * invoked in parallel.
|
|
+ * It is required that when intermediate tasks are scheduled, that {@link SchedulerThreadPool#notifyTasks(SchedulableTick)}
|
|
+ * is invoked for any scheduled task - otherwise, {@link #runTasks(BooleanSupplier)} may not be invoked to
|
|
+ * parse intermediate tasks.
|
|
+ * </p>
|
|
+ * @deprecated To be replaced
|
|
+ */
|
|
+ @Deprecated
|
|
+ public static abstract class SchedulableTick {
|
|
+ private static final AtomicLong ID_GENERATOR = new AtomicLong();
|
|
+ public final long id = ID_GENERATOR.getAndIncrement();
|
|
+
|
|
+ private static final int SCHEDULE_STATE_NOT_SCHEDULED = 0;
|
|
+ private static final int SCHEDULE_STATE_SCHEDULED = 1;
|
|
+ private static final int SCHEDULE_STATE_CANCELLED = 2;
|
|
+
|
|
+ private final AtomicInteger scheduled = new AtomicInteger();
|
|
+ private SchedulerThreadPool schedulerOwnedBy;
|
|
+ private long scheduledStart = DEADLINE_NOT_SET;
|
|
+ private TickThreadRunner ownedBy;
|
|
+
|
|
+ private LinkedSortedSet.Link<SchedulableTick> awaitingLink;
|
|
+
|
|
+ private boolean tryMarkScheduled() {
|
|
+ return this.scheduled.compareAndSet(SCHEDULE_STATE_NOT_SCHEDULED, SCHEDULE_STATE_SCHEDULED);
|
|
+ }
|
|
+
|
|
+ private boolean tryMarkCancelled() {
|
|
+ return this.scheduled.compareAndSet(SCHEDULE_STATE_SCHEDULED, SCHEDULE_STATE_CANCELLED);
|
|
+ }
|
|
+
|
|
+ private boolean isScheduled() {
|
|
+ return this.scheduled.get() == SCHEDULE_STATE_SCHEDULED;
|
|
+ }
|
|
+
|
|
+ protected final long getScheduledStart() {
|
|
+ return this.scheduledStart;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * If this task is scheduled, then this may only be invoked during {@link #runTick()},
|
|
+ * and {@link #runTasks(BooleanSupplier)}
|
|
+ */
|
|
+ protected final void setScheduledStart(final long value) {
|
|
+ this.scheduledStart = value;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Executes the tick.
|
|
+ * <p>
|
|
+ * It is the callee's responsibility to invoke {@link #setScheduledStart(long)} to adjust the start of
|
|
+ * the next tick.
|
|
+ * </p>
|
|
+ * @return {@code true} if the task should continue to be scheduled, {@code false} otherwise.
|
|
+ */
|
|
+ public abstract boolean runTick();
|
|
+
|
|
+ /**
|
|
+ * Returns whether this task has any intermediate tasks that can be executed.
|
|
+ */
|
|
+ public abstract boolean hasTasks();
|
|
+
|
|
+ /**
|
|
+ * Returns {@code null} if this task should not be scheduled, otherwise returns
|
|
+ * {@code Boolean.TRUE} if there are more intermediate tasks to execute and
|
|
+ * {@code Boolean.FALSE} if there are no more intermediate tasks to execute.
|
|
+ */
|
|
+ public abstract Boolean runTasks(final BooleanSupplier canContinue);
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "SchedulableTick:{" +
|
|
+ "class=" + this.getClass().getName() + "," +
|
|
+ "scheduled_state=" + this.scheduled.get() + ","
|
|
+ + "}";
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class TickThreadRunner implements Runnable {
|
|
+
|
|
+ /**
|
|
+ * There are no tasks in this thread's runqueue, so it is parked.
|
|
+ * <p>
|
|
+ * stateTarget = null
|
|
+ * </p>
|
|
+ */
|
|
+ private static final int STATE_IDLE = 0;
|
|
+
|
|
+ /**
|
|
+ * The runner is waiting to tick a task, as it has no intermediate tasks to execute.
|
|
+ * <p>
|
|
+ * stateTarget = the task awaiting tick
|
|
+ * </p>
|
|
+ */
|
|
+ private static final int STATE_AWAITING_TICK = 1;
|
|
+
|
|
+ /**
|
|
+ * The runner is executing a tick for one of the tasks that was in its runqueue.
|
|
+ * <p>
|
|
+ * stateTarget = the task being ticked
|
|
+ * </p>
|
|
+ */
|
|
+ private static final int STATE_EXECUTING_TICK = 2;
|
|
+
|
|
+ public final int id;
|
|
+ public final SchedulerThreadPool scheduler;
|
|
+
|
|
+ private volatile Thread thread;
|
|
+ private volatile TickThreadRunnerState state = new TickThreadRunnerState(null, STATE_IDLE);
|
|
+ private static final VarHandle STATE_HANDLE = ConcurrentUtil.getVarHandle(TickThreadRunner.class, "state", TickThreadRunnerState.class);
|
|
+
|
|
+ private void setStatePlain(final TickThreadRunnerState state) {
|
|
+ STATE_HANDLE.set(this, state);
|
|
+ }
|
|
+
|
|
+ private void setStateOpaque(final TickThreadRunnerState state) {
|
|
+ STATE_HANDLE.setOpaque(this, state);
|
|
+ }
|
|
+
|
|
+ private void setStateVolatile(final TickThreadRunnerState state) {
|
|
+ STATE_HANDLE.setVolatile(this, state);
|
|
+ }
|
|
+
|
|
+ private static record TickThreadRunnerState(SchedulableTick stateTarget, int state) {}
|
|
+
|
|
+ public TickThreadRunner(final int id, final SchedulerThreadPool scheduler) {
|
|
+ this.id = id;
|
|
+ this.scheduler = scheduler;
|
|
+ }
|
|
+
|
|
+ private Thread getRunnerThread() {
|
|
+ return this.thread;
|
|
+ }
|
|
+
|
|
+ private void acceptTask(final SchedulableTick task) {
|
|
+ if (task.ownedBy != null) {
|
|
+ throw new IllegalStateException("Already owned by another runner");
|
|
+ }
|
|
+ task.ownedBy = this;
|
|
+ final TickThreadRunnerState state = this.state;
|
|
+ if (state.state != STATE_IDLE) {
|
|
+ throw new IllegalStateException("Cannot accept task in state " + state);
|
|
+ }
|
|
+ this.setStateVolatile(new TickThreadRunnerState(task, STATE_AWAITING_TICK));
|
|
+ LockSupport.unpark(this.getRunnerThread());
|
|
+ }
|
|
+
|
|
+ private void replaceTask(final SchedulableTick task) {
|
|
+ final TickThreadRunnerState state = this.state;
|
|
+ if (state.state != STATE_AWAITING_TICK) {
|
|
+ throw new IllegalStateException("Cannot replace task in state " + state);
|
|
+ }
|
|
+ if (task.ownedBy != null) {
|
|
+ throw new IllegalStateException("Already owned by another runner");
|
|
+ }
|
|
+ task.ownedBy = this;
|
|
+
|
|
+ state.stateTarget.ownedBy = null;
|
|
+
|
|
+ this.setStateVolatile(new TickThreadRunnerState(task, STATE_AWAITING_TICK));
|
|
+ LockSupport.unpark(this.getRunnerThread());
|
|
+ }
|
|
+
|
|
+ private void forceIdle() {
|
|
+ final TickThreadRunnerState state = this.state;
|
|
+ if (state.state != STATE_AWAITING_TICK) {
|
|
+ throw new IllegalStateException("Cannot replace task in state " + state);
|
|
+ }
|
|
+ state.stateTarget.ownedBy = null;
|
|
+ this.setStateOpaque(new TickThreadRunnerState(null, STATE_IDLE));
|
|
+ // no need to unpark
|
|
+ }
|
|
+
|
|
+ private boolean takeTask(final TickThreadRunnerState state, final SchedulableTick task) {
|
|
+ synchronized (this.scheduler.scheduleLock) {
|
|
+ if (this.state != state) {
|
|
+ return false;
|
|
+ }
|
|
+ this.setStatePlain(new TickThreadRunnerState(task, STATE_EXECUTING_TICK));
|
|
+ this.scheduler.takeTask(this, task);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void returnTask(final SchedulableTick task, final boolean reschedule) {
|
|
+ synchronized (this.scheduler.scheduleLock) {
|
|
+ task.ownedBy = null;
|
|
+
|
|
+ final SchedulableTick newWait = this.scheduler.returnTask(this, reschedule && task.isScheduled() ? task : null);
|
|
+ if (newWait == null) {
|
|
+ this.setStatePlain(new TickThreadRunnerState(null, STATE_IDLE));
|
|
+ } else {
|
|
+ if (newWait.ownedBy != null) {
|
|
+ throw new IllegalStateException("Already owned by another runner");
|
|
+ }
|
|
+ newWait.ownedBy = this;
|
|
+ this.setStatePlain(new TickThreadRunnerState(newWait, STATE_AWAITING_TICK));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ this.thread = Thread.currentThread();
|
|
+
|
|
+ main_state_loop:
|
|
+ for (;;) {
|
|
+ final TickThreadRunnerState startState = this.state;
|
|
+ final int startStateType = startState.state;
|
|
+ final SchedulableTick startStateTask = startState.stateTarget;
|
|
+
|
|
+ if (this.scheduler.halted) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (startStateType) {
|
|
+ case STATE_IDLE: {
|
|
+ while (this.state.state == STATE_IDLE) {
|
|
+ LockSupport.park();
|
|
+ if (this.scheduler.halted) {
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ continue main_state_loop;
|
|
+ }
|
|
+
|
|
+ case STATE_AWAITING_TICK: {
|
|
+ final long deadline = startStateTask.getScheduledStart();
|
|
+ for (;;) {
|
|
+ if (this.state != startState) {
|
|
+ continue main_state_loop;
|
|
+ }
|
|
+ final long diff = deadline - System.nanoTime();
|
|
+ if (diff <= 0L) {
|
|
+ break;
|
|
+ }
|
|
+ LockSupport.parkNanos(startState, diff);
|
|
+ if (this.scheduler.halted) {
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!this.takeTask(startState, startStateTask)) {
|
|
+ continue main_state_loop;
|
|
+ }
|
|
+
|
|
+ // TODO exception handling
|
|
+ final boolean reschedule = startStateTask.runTick();
|
|
+
|
|
+ this.returnTask(startStateTask, reschedule);
|
|
+
|
|
+ continue main_state_loop;
|
|
+ }
|
|
+
|
|
+ case STATE_EXECUTING_TICK: {
|
|
+ throw new IllegalStateException("Tick execution must be set by runner thread, not by any other thread");
|
|
+ }
|
|
+
|
|
+ default: {
|
|
+ throw new IllegalStateException("Unknown state: " + startState);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedSortedSet.java b/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedSortedSet.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..82c4c11b0b564c97ac92bd5f54e3754a7ba95184
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedSortedSet.java
|
|
@@ -0,0 +1,270 @@
|
|
+package ca.spottedleaf.concurrentutil.set;
|
|
+
|
|
+import java.util.Comparator;
|
|
+import java.util.Iterator;
|
|
+import java.util.NoSuchElementException;
|
|
+
|
|
+public final class LinkedSortedSet<E> implements Iterable<E> {
|
|
+
|
|
+ public final Comparator<? super E> comparator;
|
|
+
|
|
+ private Link<E> head;
|
|
+ private Link<E> tail;
|
|
+
|
|
+ public LinkedSortedSet() {
|
|
+ this((Comparator)Comparator.naturalOrder());
|
|
+ }
|
|
+
|
|
+ public LinkedSortedSet(final Comparator<? super E> comparator) {
|
|
+ this.comparator = comparator;
|
|
+ }
|
|
+
|
|
+ public void clear() {
|
|
+ this.head = this.tail = null;
|
|
+ }
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ return this.head == null;
|
|
+ }
|
|
+
|
|
+ public E first() {
|
|
+ final Link<E> head = this.head;
|
|
+ return head == null ? null : head.element;
|
|
+ }
|
|
+
|
|
+ public E last() {
|
|
+ final Link<E> tail = this.tail;
|
|
+ return tail == null ? null : tail.element;
|
|
+ }
|
|
+
|
|
+ public boolean containsFirst(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+ for (Link<E> curr = this.head; curr != null; curr = curr.next) {
|
|
+ if (comparator.compare(element, curr.element) == 0) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public boolean containsLast(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+ for (Link<E> curr = this.tail; curr != null; curr = curr.prev) {
|
|
+ if (comparator.compare(element, curr.element) == 0) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ private void removeNode(final Link<E> node) {
|
|
+ final Link<E> prev = node.prev;
|
|
+ final Link<E> next = node.next;
|
|
+
|
|
+ // help GC
|
|
+ node.element = null;
|
|
+ node.prev = null;
|
|
+ node.next = null;
|
|
+
|
|
+ if (prev == null) {
|
|
+ this.head = next;
|
|
+ } else {
|
|
+ prev.next = next;
|
|
+ }
|
|
+
|
|
+ if (next == null) {
|
|
+ this.tail = prev;
|
|
+ } else {
|
|
+ next.prev = prev;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean remove(final Link<E> link) {
|
|
+ if (link.element == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.removeNode(link);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public boolean removeFirst(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+ for (Link<E> curr = this.head; curr != null; curr = curr.next) {
|
|
+ if (comparator.compare(element, curr.element) == 0) {
|
|
+ this.removeNode(curr);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public boolean removeLast(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+ for (Link<E> curr = this.tail; curr != null; curr = curr.prev) {
|
|
+ if (comparator.compare(element, curr.element) == 0) {
|
|
+ this.removeNode(curr);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterator<E> iterator() {
|
|
+ return new Iterator<>() {
|
|
+ private Link<E> next = LinkedSortedSet.this.head;
|
|
+
|
|
+ @Override
|
|
+ public boolean hasNext() {
|
|
+ return this.next != null;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public E next() {
|
|
+ final Link<E> next = this.next;
|
|
+ if (next == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+ this.next = next.next;
|
|
+ return next.element;
|
|
+ }
|
|
+ };
|
|
+ }
|
|
+
|
|
+ public E pollFirst() {
|
|
+ final Link<E> head = this.head;
|
|
+ if (head == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final E ret = head.element;
|
|
+ final Link<E> next = head.next;
|
|
+
|
|
+ // unlink head
|
|
+ this.head = next;
|
|
+ if (next == null) {
|
|
+ this.tail = null;
|
|
+ } else {
|
|
+ next.prev = null;
|
|
+ }
|
|
+
|
|
+ // help GC
|
|
+ head.element = null;
|
|
+ head.next = null;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public E pollLast() {
|
|
+ final Link<E> tail = this.tail;
|
|
+ if (tail == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final E ret = tail.element;
|
|
+ final Link<E> prev = tail.prev;
|
|
+
|
|
+ // unlink tail
|
|
+ this.tail = prev;
|
|
+ if (prev == null) {
|
|
+ this.head = null;
|
|
+ } else {
|
|
+ prev.next = null;
|
|
+ }
|
|
+
|
|
+ // help GC
|
|
+ tail.element = null;
|
|
+ tail.prev = null;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Link<E> addLast(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+
|
|
+ Link<E> curr = this.tail;
|
|
+ if (curr != null) {
|
|
+ int compare;
|
|
+
|
|
+ while ((compare = comparator.compare(element, curr.element)) < 0) {
|
|
+ Link<E> prev = curr;
|
|
+ curr = curr.prev;
|
|
+ if (curr != null) {
|
|
+ continue;
|
|
+ }
|
|
+ return this.head = prev.prev = new Link<>(element, null, prev);
|
|
+ }
|
|
+
|
|
+ if (compare != 0) {
|
|
+ // insert after curr
|
|
+ final Link<E> next = curr.next;
|
|
+ final Link<E> insert = new Link<>(element, curr, next);
|
|
+ curr.next = insert;
|
|
+
|
|
+ if (next == null) {
|
|
+ this.tail = insert;
|
|
+ } else {
|
|
+ next.prev = insert;
|
|
+ }
|
|
+ return insert;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ } else {
|
|
+ return this.head = this.tail = new Link<>(element);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Link<E> addFirst(final E element) {
|
|
+ final Comparator<? super E> comparator = this.comparator;
|
|
+
|
|
+ Link<E> curr = this.head;
|
|
+ if (curr != null) {
|
|
+ int compare;
|
|
+
|
|
+ while ((compare = comparator.compare(element, curr.element)) > 0) {
|
|
+ Link<E> prev = curr;
|
|
+ curr = curr.next;
|
|
+ if (curr != null) {
|
|
+ continue;
|
|
+ }
|
|
+ return this.tail = prev.next = new Link<>(element, prev, null);
|
|
+ }
|
|
+
|
|
+ if (compare != 0) {
|
|
+ // insert before curr
|
|
+ final Link<E> prev = curr.prev;
|
|
+ final Link<E> insert = new Link<>(element, prev, curr);
|
|
+ curr.prev = insert;
|
|
+
|
|
+ if (prev == null) {
|
|
+ this.head = insert;
|
|
+ } else {
|
|
+ prev.next = insert;
|
|
+ }
|
|
+ return insert;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ } else {
|
|
+ return this.head = this.tail = new Link<>(element);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class Link<E> {
|
|
+ private E element;
|
|
+ private Link<E> prev;
|
|
+ private Link<E> next;
|
|
+
|
|
+ private Link(final E element) {
|
|
+ this.element = element;
|
|
+ }
|
|
+
|
|
+ private Link(final E element, final Link<E> prev, final Link<E> next) {
|
|
+ this.element = element;
|
|
+ this.prev = prev;
|
|
+ this.next = next;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedUnsortedList.java b/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedUnsortedList.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..bd8eb4f25d1dee00fbf9c05c14b0d94c5c641a55
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/set/LinkedUnsortedList.java
|
|
@@ -0,0 +1,204 @@
|
|
+package ca.spottedleaf.concurrentutil.set;
|
|
+
|
|
+import java.util.Iterator;
|
|
+import java.util.NoSuchElementException;
|
|
+import java.util.Objects;
|
|
+
|
|
+public final class LinkedUnsortedList<E> implements Iterable<E> {
|
|
+
|
|
+ private Link<E> head;
|
|
+ private Link<E> tail;
|
|
+
|
|
+ public LinkedUnsortedList() {}
|
|
+
|
|
+ public void clear() {
|
|
+ this.head = this.tail = null;
|
|
+ }
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ return this.head == null;
|
|
+ }
|
|
+
|
|
+ public E first() {
|
|
+ final Link<E> head = this.head;
|
|
+ return head == null ? null : head.element;
|
|
+ }
|
|
+
|
|
+ public E last() {
|
|
+ final Link<E> tail = this.tail;
|
|
+ return tail == null ? null : tail.element;
|
|
+ }
|
|
+
|
|
+ public boolean containsFirst(final E element) {
|
|
+ for (Link<E> curr = this.head; curr != null; curr = curr.next) {
|
|
+ if (Objects.equals(element, curr.element)) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public boolean containsLast(final E element) {
|
|
+ for (Link<E> curr = this.tail; curr != null; curr = curr.prev) {
|
|
+ if (Objects.equals(element, curr.element)) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ private void removeNode(final Link<E> node) {
|
|
+ final Link<E> prev = node.prev;
|
|
+ final Link<E> next = node.next;
|
|
+
|
|
+ // help GC
|
|
+ node.element = null;
|
|
+ node.prev = null;
|
|
+ node.next = null;
|
|
+
|
|
+ if (prev == null) {
|
|
+ this.head = next;
|
|
+ } else {
|
|
+ prev.next = next;
|
|
+ }
|
|
+
|
|
+ if (next == null) {
|
|
+ this.tail = prev;
|
|
+ } else {
|
|
+ next.prev = prev;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean remove(final Link<E> link) {
|
|
+ if (link.element == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.removeNode(link);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public boolean removeFirst(final E element) {
|
|
+ for (Link<E> curr = this.head; curr != null; curr = curr.next) {
|
|
+ if (Objects.equals(element, curr.element)) {
|
|
+ this.removeNode(curr);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public boolean removeLast(final E element) {
|
|
+ for (Link<E> curr = this.tail; curr != null; curr = curr.prev) {
|
|
+ if (Objects.equals(element, curr.element)) {
|
|
+ this.removeNode(curr);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterator<E> iterator() {
|
|
+ return new Iterator<>() {
|
|
+ private Link<E> next = LinkedUnsortedList.this.head;
|
|
+
|
|
+ @Override
|
|
+ public boolean hasNext() {
|
|
+ return this.next != null;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public E next() {
|
|
+ final Link<E> next = this.next;
|
|
+ if (next == null) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+ this.next = next.next;
|
|
+ return next.element;
|
|
+ }
|
|
+ };
|
|
+ }
|
|
+
|
|
+ public E pollFirst() {
|
|
+ final Link<E> head = this.head;
|
|
+ if (head == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final E ret = head.element;
|
|
+ final Link<E> next = head.next;
|
|
+
|
|
+ // unlink head
|
|
+ this.head = next;
|
|
+ if (next == null) {
|
|
+ this.tail = null;
|
|
+ } else {
|
|
+ next.prev = null;
|
|
+ }
|
|
+
|
|
+ // help GC
|
|
+ head.element = null;
|
|
+ head.next = null;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public E pollLast() {
|
|
+ final Link<E> tail = this.tail;
|
|
+ if (tail == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final E ret = tail.element;
|
|
+ final Link<E> prev = tail.prev;
|
|
+
|
|
+ // unlink tail
|
|
+ this.tail = prev;
|
|
+ if (prev == null) {
|
|
+ this.head = null;
|
|
+ } else {
|
|
+ prev.next = null;
|
|
+ }
|
|
+
|
|
+ // help GC
|
|
+ tail.element = null;
|
|
+ tail.prev = null;
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public Link<E> addLast(final E element) {
|
|
+ final Link<E> curr = this.tail;
|
|
+ if (curr != null) {
|
|
+ return this.tail = new Link<>(element, curr, null);
|
|
+ } else {
|
|
+ return this.head = this.tail = new Link<>(element);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Link<E> addFirst(final E element) {
|
|
+ final Link<E> curr = this.head;
|
|
+ if (curr != null) {
|
|
+ return this.head = new Link<>(element, null, curr);
|
|
+ } else {
|
|
+ return this.head = this.tail = new Link<>(element);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class Link<E> {
|
|
+ private E element;
|
|
+ private Link<E> prev;
|
|
+ private Link<E> next;
|
|
+
|
|
+ private Link(final E element) {
|
|
+ this.element = element;
|
|
+ }
|
|
+
|
|
+ private Link(final E element, final Link<E> prev, final Link<E> next) {
|
|
+ this.element = element;
|
|
+ this.prev = prev;
|
|
+ this.next = next;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/CollectionUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/CollectionUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..9420b9822de99d3a31224642452835b0c986f7b4
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/CollectionUtil.java
|
|
@@ -0,0 +1,31 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+import java.util.Collection;
|
|
+
|
|
+public final class CollectionUtil {
|
|
+
|
|
+ public static String toString(final Collection<?> collection, final String name) {
|
|
+ return CollectionUtil.toString(collection, name, new StringBuilder(name.length() + 128)).toString();
|
|
+ }
|
|
+
|
|
+ public static StringBuilder toString(final Collection<?> collection, final String name, final StringBuilder builder) {
|
|
+ builder.append(name).append("{elements={");
|
|
+
|
|
+ boolean first = true;
|
|
+
|
|
+ for (final Object element : collection) {
|
|
+ if (!first) {
|
|
+ builder.append(", ");
|
|
+ }
|
|
+ first = false;
|
|
+
|
|
+ builder.append('"').append(element).append('"');
|
|
+ }
|
|
+
|
|
+ return builder.append("}}");
|
|
+ }
|
|
+
|
|
+ private CollectionUtil() {
|
|
+ throw new RuntimeException();
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/ConcurrentUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/ConcurrentUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..23ae82e55696a7e2ff0e0f9609c0df6a48bb8d1d
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/ConcurrentUtil.java
|
|
@@ -0,0 +1,166 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+import java.lang.invoke.MethodHandles;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+
|
|
+public final class ConcurrentUtil {
|
|
+
|
|
+ public static String genericToString(final Object object) {
|
|
+ return object == null ? "null" : object.getClass().getName() + ":" + object.hashCode() + ":" + object.toString();
|
|
+ }
|
|
+
|
|
+ public static void rethrow(Throwable exception) {
|
|
+ rethrow0(exception);
|
|
+ }
|
|
+
|
|
+ private static <T extends Throwable> void rethrow0(Throwable thr) throws T {
|
|
+ throw (T)thr;
|
|
+ }
|
|
+
|
|
+ public static VarHandle getVarHandle(final Class<?> lookIn, final String fieldName, final Class<?> fieldType) {
|
|
+ try {
|
|
+ return MethodHandles.privateLookupIn(lookIn, MethodHandles.lookup()).findVarHandle(lookIn, fieldName, fieldType);
|
|
+ } catch (final Exception ex) {
|
|
+ throw new RuntimeException(ex); // unreachable
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static VarHandle getStaticVarHandle(final Class<?> lookIn, final String fieldName, final Class<?> fieldType) {
|
|
+ try {
|
|
+ return MethodHandles.privateLookupIn(lookIn, MethodHandles.lookup()).findStaticVarHandle(lookIn, fieldName, fieldType);
|
|
+ } catch (final Exception ex) {
|
|
+ throw new RuntimeException(ex); // unreachable
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Non-exponential backoff algorithm to use in lightly contended areas.
|
|
+ * @see ConcurrentUtil#exponentiallyBackoffSimple(long)
|
|
+ * @see ConcurrentUtil#exponentiallyBackoffComplex(long)
|
|
+ */
|
|
+ public static void backoff() {
|
|
+ Thread.onSpinWait();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Backoff algorithm to use for a short held lock (i.e compareAndExchange operation). Generally this should not be
|
|
+ * used when a thread can block another thread. Instead, use {@link ConcurrentUtil#exponentiallyBackoffComplex(long)}.
|
|
+ * @param counter The current counter.
|
|
+ * @return The counter plus 1.
|
|
+ * @see ConcurrentUtil#backoff()
|
|
+ * @see ConcurrentUtil#exponentiallyBackoffComplex(long)
|
|
+ */
|
|
+ public static long exponentiallyBackoffSimple(final long counter) {
|
|
+ for (long i = 0; i < counter; ++i) {
|
|
+ backoff();
|
|
+ }
|
|
+ return counter + 1L;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Backoff algorithm to use for a lock that can block other threads (i.e if another thread contending with this thread
|
|
+ * can be thrown off the scheduler). This lock should not be used for simple locks such as compareAndExchange.
|
|
+ * @param counter The current counter.
|
|
+ * @return The next (if any) step in the backoff logic.
|
|
+ * @see ConcurrentUtil#backoff()
|
|
+ * @see ConcurrentUtil#exponentiallyBackoffSimple(long)
|
|
+ */
|
|
+ public static long exponentiallyBackoffComplex(final long counter) {
|
|
+ // TODO experimentally determine counters
|
|
+ if (counter < 100L) {
|
|
+ return exponentiallyBackoffSimple(counter);
|
|
+ }
|
|
+ if (counter < 1_200L) {
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos(1_000L);
|
|
+ return counter + 1L;
|
|
+ }
|
|
+ // scale 0.1ms (100us) per failure
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos(100_000L * counter);
|
|
+ return counter + 1;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Simple exponential backoff that will linearly increase the time per failure, according to the scale.
|
|
+ * @param counter The current failure counter.
|
|
+ * @param scale Time per failure, in ns.
|
|
+ * @param max The maximum time to wait for, in ns.
|
|
+ * @return The next counter.
|
|
+ */
|
|
+ public static long linearLongBackoff(long counter, final long scale, long max) {
|
|
+ counter = Math.min(Long.MAX_VALUE, counter + 1); // prevent overflow
|
|
+ max = Math.max(0, max);
|
|
+
|
|
+ if (scale <= 0L) {
|
|
+ return counter;
|
|
+ }
|
|
+
|
|
+ long time = scale * counter;
|
|
+
|
|
+ if (time > max || time / scale != counter) {
|
|
+ time = max;
|
|
+ }
|
|
+
|
|
+ boolean interrupted = Thread.interrupted();
|
|
+ if (time > 1_000_000L) { // 1ms
|
|
+ Thread.yield();
|
|
+ }
|
|
+ LockSupport.parkNanos(time);
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+ return counter;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Simple exponential backoff that will linearly increase the time per failure, according to the scale.
|
|
+ * @param counter The current failure counter.
|
|
+ * @param scale Time per failure, in ns.
|
|
+ * @param max The maximum time to wait for, in ns.
|
|
+ * @param deadline The deadline in ns. Deadline time source: {@link System#nanoTime()}.
|
|
+ * @return The next counter.
|
|
+ */
|
|
+ public static long linearLongBackoffDeadline(long counter, final long scale, long max, long deadline) {
|
|
+ counter = Math.min(Long.MAX_VALUE, counter + 1); // prevent overflow
|
|
+ max = Math.max(0, max);
|
|
+
|
|
+ if (scale <= 0L) {
|
|
+ return counter;
|
|
+ }
|
|
+
|
|
+ long time = scale * counter;
|
|
+
|
|
+ // check overflow
|
|
+ if (time / scale != counter) {
|
|
+ // overflew
|
|
+ --counter;
|
|
+ time = max;
|
|
+ } else if (time > max) {
|
|
+ time = max;
|
|
+ }
|
|
+
|
|
+ final long currTime = System.nanoTime();
|
|
+ final long diff = deadline - currTime;
|
|
+ if (diff <= 0) {
|
|
+ return counter;
|
|
+ }
|
|
+ if (diff <= 1_500_000L) { // 1.5ms
|
|
+ time = 100_000L; // 100us
|
|
+ } else if (time > 1_000_000L) { // 1ms
|
|
+ Thread.yield();
|
|
+ }
|
|
+
|
|
+ boolean interrupted = Thread.interrupted();
|
|
+ LockSupport.parkNanos(time);
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+ return counter;
|
|
+ }
|
|
+
|
|
+ public static VarHandle getArrayHandle(final Class<?> type) {
|
|
+ return MethodHandles.arrayElementVarHandle(type);
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/HashUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/HashUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..2b9f36211d1cbb4fcf1457c0a83592499e9aa23b
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/HashUtil.java
|
|
@@ -0,0 +1,111 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class HashUtil {
|
|
+
|
|
+ // Copied from fastutil HashCommon
|
|
+
|
|
+ /** 2<sup>32</sup> · φ, φ = (√5 − 1)/2. */
|
|
+ private static final int INT_PHI = 0x9E3779B9;
|
|
+ /** The reciprocal of {@link #INT_PHI} modulo 2<sup>32</sup>. */
|
|
+ private static final int INV_INT_PHI = 0x144cbc89;
|
|
+ /** 2<sup>64</sup> · φ, φ = (√5 − 1)/2. */
|
|
+ private static final long LONG_PHI = 0x9E3779B97F4A7C15L;
|
|
+ /** The reciprocal of {@link #LONG_PHI} modulo 2<sup>64</sup>. */
|
|
+ private static final long INV_LONG_PHI = 0xf1de83e19937733dL;
|
|
+
|
|
+ /** Avalanches the bits of an integer by applying the finalisation step of MurmurHash3.
|
|
+ *
|
|
+ * <p>This method implements the finalisation step of Austin Appleby's <a href="http://code.google.com/p/smhasher/">MurmurHash3</a>.
|
|
+ * Its purpose is to avalanche the bits of the argument to within 0.25% bias.
|
|
+ *
|
|
+ * @param x an integer.
|
|
+ * @return a hash value with good avalanching properties.
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all integers
|
|
+ public static int murmurHash3(int x) {
|
|
+ x ^= x >>> 16;
|
|
+ x *= 0x85ebca6b;
|
|
+ x ^= x >>> 13;
|
|
+ x *= 0xc2b2ae35;
|
|
+ x ^= x >>> 16;
|
|
+ return x;
|
|
+ }
|
|
+
|
|
+
|
|
+ /** Avalanches the bits of a long integer by applying the finalisation step of MurmurHash3.
|
|
+ *
|
|
+ * <p>This method implements the finalisation step of Austin Appleby's <a href="http://code.google.com/p/smhasher/">MurmurHash3</a>.
|
|
+ * Its purpose is to avalanche the bits of the argument to within 0.25% bias.
|
|
+ *
|
|
+ * @param x a long integer.
|
|
+ * @return a hash value with good avalanching properties.
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all longs
|
|
+ public static long murmurHash3(long x) {
|
|
+ x ^= x >>> 33;
|
|
+ x *= 0xff51afd7ed558ccdL;
|
|
+ x ^= x >>> 33;
|
|
+ x *= 0xc4ceb9fe1a85ec53L;
|
|
+ x ^= x >>> 33;
|
|
+ return x;
|
|
+ }
|
|
+
|
|
+ /** Quickly mixes the bits of an integer.
|
|
+ *
|
|
+ * <p>This method mixes the bits of the argument by multiplying by the golden ratio and
|
|
+ * xorshifting the result. It is borrowed from <a href="https://github.com/leventov/Koloboke">Koloboke</a>, and
|
|
+ * it has slightly worse behaviour than {@link #murmurHash3(int)} (in open-addressing hash tables the average number of probes
|
|
+ * is slightly larger), but it's much faster.
|
|
+ *
|
|
+ * @param x an integer.
|
|
+ * @return a hash value obtained by mixing the bits of {@code x}.
|
|
+ * @see #invMix(int)
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all integers
|
|
+ public static int mix(final int x) {
|
|
+ final int h = x * INT_PHI;
|
|
+ return h ^ (h >>> 16);
|
|
+ }
|
|
+
|
|
+ /** The inverse of {@link #mix(int)}. This method is mainly useful to create unit tests.
|
|
+ *
|
|
+ * @param x an integer.
|
|
+ * @return a value that passed through {@link #mix(int)} would give {@code x}.
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all integers
|
|
+ public static int invMix(final int x) {
|
|
+ return (x ^ x >>> 16) * INV_INT_PHI;
|
|
+ }
|
|
+
|
|
+ /** Quickly mixes the bits of a long integer.
|
|
+ *
|
|
+ * <p>This method mixes the bits of the argument by multiplying by the golden ratio and
|
|
+ * xorshifting twice the result. It is borrowed from <a href="https://github.com/leventov/Koloboke">Koloboke</a>, and
|
|
+ * it has slightly worse behaviour than {@link #murmurHash3(long)} (in open-addressing hash tables the average number of probes
|
|
+ * is slightly larger), but it's much faster.
|
|
+ *
|
|
+ * @param x a long integer.
|
|
+ * @return a hash value obtained by mixing the bits of {@code x}.
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all longs
|
|
+ public static long mix(final long x) {
|
|
+ long h = x * LONG_PHI;
|
|
+ h ^= h >>> 32;
|
|
+ return h ^ (h >>> 16);
|
|
+ }
|
|
+
|
|
+ /** The inverse of {@link #mix(long)}. This method is mainly useful to create unit tests.
|
|
+ *
|
|
+ * @param x a long integer.
|
|
+ * @return a value that passed through {@link #mix(long)} would give {@code x}.
|
|
+ */
|
|
+ // additional note: this function is a bijection onto all longs
|
|
+ public static long invMix(long x) {
|
|
+ x ^= x >>> 32;
|
|
+ x ^= x >>> 16;
|
|
+ return (x ^ x >>> 32) * INV_LONG_PHI;
|
|
+ }
|
|
+
|
|
+
|
|
+ private HashUtil() {}
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/IntPairUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/IntPairUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..4e61c477a56e645228d5a2015c26816954d17bf8
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/IntPairUtil.java
|
|
@@ -0,0 +1,46 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class IntPairUtil {
|
|
+
|
|
+ /**
|
|
+ * Packs the specified integers into one long value.
|
|
+ */
|
|
+ public static long key(final int left, final int right) {
|
|
+ return ((long)right << 32) | (left & 0xFFFFFFFFL);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Retrieves the left packed integer from the key
|
|
+ */
|
|
+ public static int left(final long key) {
|
|
+ return (int)key;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Retrieves the right packed integer from the key
|
|
+ */
|
|
+ public static int right(final long key) {
|
|
+ return (int)(key >>> 32);
|
|
+ }
|
|
+
|
|
+ public static String toString(final long key) {
|
|
+ return "{left:" + left(key) + ", right:" + right(key) + "}";
|
|
+ }
|
|
+
|
|
+ public static String toString(final long[] array, final int from, final int to) {
|
|
+ final StringBuilder ret = new StringBuilder();
|
|
+ ret.append("[");
|
|
+
|
|
+ for (int i = from; i < to; ++i) {
|
|
+ if (i != from) {
|
|
+ ret.append(", ");
|
|
+ }
|
|
+ ret.append(toString(array[i]));
|
|
+ }
|
|
+
|
|
+ ret.append("]");
|
|
+ return ret.toString();
|
|
+ }
|
|
+
|
|
+ private IntPairUtil() {}
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/IntegerUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/IntegerUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..9d7b9b8158cd01d12adbd7896ff77bee9828e101
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/IntegerUtil.java
|
|
@@ -0,0 +1,196 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class IntegerUtil {
|
|
+
|
|
+ public static final int HIGH_BIT_U32 = Integer.MIN_VALUE;
|
|
+ public static final long HIGH_BIT_U64 = Long.MIN_VALUE;
|
|
+
|
|
+ public static int ceilLog2(final int value) {
|
|
+ return Integer.SIZE - Integer.numberOfLeadingZeros(value - 1); // see doc of numberOfLeadingZeros
|
|
+ }
|
|
+
|
|
+ public static long ceilLog2(final long value) {
|
|
+ return Long.SIZE - Long.numberOfLeadingZeros(value - 1); // see doc of numberOfLeadingZeros
|
|
+ }
|
|
+
|
|
+ public static int floorLog2(final int value) {
|
|
+ // xor is optimized subtract for 2^n -1
|
|
+ // note that (2^n -1) - k = (2^n -1) ^ k for k <= (2^n - 1)
|
|
+ return (Integer.SIZE - 1) ^ Integer.numberOfLeadingZeros(value); // see doc of numberOfLeadingZeros
|
|
+ }
|
|
+
|
|
+ public static int floorLog2(final long value) {
|
|
+ // xor is optimized subtract for 2^n -1
|
|
+ // note that (2^n -1) - k = (2^n -1) ^ k for k <= (2^n - 1)
|
|
+ return (Long.SIZE - 1) ^ Long.numberOfLeadingZeros(value); // see doc of numberOfLeadingZeros
|
|
+ }
|
|
+
|
|
+ public static int roundCeilLog2(final int value) {
|
|
+ // optimized variant of 1 << (32 - leading(val - 1))
|
|
+ // given
|
|
+ // 1 << n = HIGH_BIT_32 >>> (31 - n) for n [0, 32)
|
|
+ // 1 << (32 - leading(val - 1)) = HIGH_BIT_32 >>> (31 - (32 - leading(val - 1)))
|
|
+ // HIGH_BIT_32 >>> (31 - (32 - leading(val - 1)))
|
|
+ // HIGH_BIT_32 >>> (31 - 32 + leading(val - 1))
|
|
+ // HIGH_BIT_32 >>> (-1 + leading(val - 1))
|
|
+ return HIGH_BIT_U32 >>> (Integer.numberOfLeadingZeros(value - 1) - 1);
|
|
+ }
|
|
+
|
|
+ public static long roundCeilLog2(final long value) {
|
|
+ // see logic documented above
|
|
+ return HIGH_BIT_U64 >>> (Long.numberOfLeadingZeros(value - 1) - 1);
|
|
+ }
|
|
+
|
|
+ public static int roundFloorLog2(final int value) {
|
|
+ // optimized variant of 1 << (31 - leading(val))
|
|
+ // given
|
|
+ // 1 << n = HIGH_BIT_32 >>> (31 - n) for n [0, 32)
|
|
+ // 1 << (31 - leading(val)) = HIGH_BIT_32 >> (31 - (31 - leading(val)))
|
|
+ // HIGH_BIT_32 >> (31 - (31 - leading(val)))
|
|
+ // HIGH_BIT_32 >> (31 - 31 + leading(val))
|
|
+ return HIGH_BIT_U32 >>> Integer.numberOfLeadingZeros(value);
|
|
+ }
|
|
+
|
|
+ public static long roundFloorLog2(final long value) {
|
|
+ // see logic documented above
|
|
+ return HIGH_BIT_U64 >>> Long.numberOfLeadingZeros(value);
|
|
+ }
|
|
+
|
|
+ public static boolean isPowerOfTwo(final int n) {
|
|
+ // 2^n has one bit
|
|
+ // note: this rets true for 0 still
|
|
+ return IntegerUtil.getTrailingBit(n) == n;
|
|
+ }
|
|
+
|
|
+ public static boolean isPowerOfTwo(final long n) {
|
|
+ // 2^n has one bit
|
|
+ // note: this rets true for 0 still
|
|
+ return IntegerUtil.getTrailingBit(n) == n;
|
|
+ }
|
|
+
|
|
+ public static int getTrailingBit(final int n) {
|
|
+ return -n & n;
|
|
+ }
|
|
+
|
|
+ public static long getTrailingBit(final long n) {
|
|
+ return -n & n;
|
|
+ }
|
|
+
|
|
+ public static int trailingZeros(final int n) {
|
|
+ return Integer.numberOfTrailingZeros(n);
|
|
+ }
|
|
+
|
|
+ public static int trailingZeros(final long n) {
|
|
+ return Long.numberOfTrailingZeros(n);
|
|
+ }
|
|
+
|
|
+ // from hacker's delight (signed division magic value)
|
|
+ public static int getDivisorMultiple(final long numbers) {
|
|
+ return (int)(numbers >>> 32);
|
|
+ }
|
|
+
|
|
+ // from hacker's delight (signed division magic value)
|
|
+ public static int getDivisorShift(final long numbers) {
|
|
+ return (int)numbers;
|
|
+ }
|
|
+
|
|
+ // copied from hacker's delight (signed division magic value)
|
|
+ // http://www.hackersdelight.org/hdcodetxt/magic.c.txt
|
|
+ public static long getDivisorNumbers(final int d) {
|
|
+ final int ad = branchlessAbs(d);
|
|
+
|
|
+ if (ad < 2) {
|
|
+ throw new IllegalArgumentException("|number| must be in [2, 2^31 -1], not: " + d);
|
|
+ }
|
|
+
|
|
+ final int two31 = 0x80000000;
|
|
+ final long mask = 0xFFFFFFFFL; // mask for enforcing unsigned behaviour
|
|
+
|
|
+ /*
|
|
+ Signed usage:
|
|
+ int number;
|
|
+ long magic = getDivisorNumbers(div);
|
|
+ long mul = magic >>> 32;
|
|
+ int sign = number >> 31;
|
|
+ int result = (int)(((long)number * mul) >>> magic) - sign;
|
|
+ */
|
|
+ /*
|
|
+ Unsigned usage: (note: fails for input > Integer.MAX_VALUE, only use when input < Integer.MAX_VALUE to avoid sign calculation)
|
|
+ int number;
|
|
+ long magic = getDivisorNumbers(div);
|
|
+ long mul = magic >>> 32;
|
|
+ int result = (int)(((long)number * mul) >>> magic);
|
|
+ */
|
|
+
|
|
+ int p = 31;
|
|
+
|
|
+ // all these variables are UNSIGNED!
|
|
+ int t = two31 + (d >>> 31);
|
|
+ int anc = t - 1 - (int)((t & mask)%ad);
|
|
+ int q1 = (int)((two31 & mask)/(anc & mask));
|
|
+ int r1 = two31 - q1*anc;
|
|
+ int q2 = (int)((two31 & mask)/(ad & mask));
|
|
+ int r2 = two31 - q2*ad;
|
|
+ int delta;
|
|
+
|
|
+ do {
|
|
+ p = p + 1;
|
|
+ q1 = 2*q1; // Update q1 = 2**p/|nc|.
|
|
+ r1 = 2*r1; // Update r1 = rem(2**p, |nc|).
|
|
+ if ((r1 & mask) >= (anc & mask)) {// (Must be an unsigned comparison here)
|
|
+ q1 = q1 + 1;
|
|
+ r1 = r1 - anc;
|
|
+ }
|
|
+ q2 = 2*q2; // Update q2 = 2**p/|d|.
|
|
+ r2 = 2*r2; // Update r2 = rem(2**p, |d|).
|
|
+ if ((r2 & mask) >= (ad & mask)) {// (Must be an unsigned comparison here)
|
|
+ q2 = q2 + 1;
|
|
+ r2 = r2 - ad;
|
|
+ }
|
|
+ delta = ad - r2;
|
|
+ } while ((q1 & mask) < (delta & mask) || (q1 == delta && r1 == 0));
|
|
+
|
|
+ int magicNum = q2 + 1;
|
|
+ if (d < 0) {
|
|
+ magicNum = -magicNum;
|
|
+ }
|
|
+ int shift = p;
|
|
+ return ((long)magicNum << 32) | shift;
|
|
+ }
|
|
+
|
|
+ public static int branchlessAbs(final int val) {
|
|
+ // -n = -1 ^ n + 1
|
|
+ final int mask = val >> (Integer.SIZE - 1); // -1 if < 0, 0 if >= 0
|
|
+ return (mask ^ val) - mask; // if val < 0, then (0 ^ val) - 0 else (-1 ^ val) + 1
|
|
+ }
|
|
+
|
|
+ public static long branchlessAbs(final long val) {
|
|
+ // -n = -1 ^ n + 1
|
|
+ final long mask = val >> (Long.SIZE - 1); // -1 if < 0, 0 if >= 0
|
|
+ return (mask ^ val) - mask; // if val < 0, then (0 ^ val) - 0 else (-1 ^ val) + 1
|
|
+ }
|
|
+
|
|
+ // https://lemire.me/blog/2019/02/08/faster-remainders-when-the-divisor-is-a-constant-beating-compilers-and-libdivide
|
|
+ /**
|
|
+ *
|
|
+ * Usage:
|
|
+ * <pre>
|
|
+ * {@code
|
|
+ * static final long mult = getSimpleMultiplier(divisor, bits);
|
|
+ * long x = ...;
|
|
+ * long magic = x * mult;
|
|
+ * long divQ = magic >>> bits;
|
|
+ * long divR = ((magic & ((1 << bits) - 1)) * divisor) >>> bits;
|
|
+ * }
|
|
+ * </pre>
|
|
+ *
|
|
+ * @param bits The number of bits of precision for the returned result
|
|
+ */
|
|
+ public static long getUnsignedDivisorMagic(final long divisor, final int bits) {
|
|
+ return (((1L << bits) - 1L) / divisor) + 1;
|
|
+ }
|
|
+
|
|
+ private IntegerUtil() {
|
|
+ throw new RuntimeException();
|
|
+ }
|
|
+}
|
|
\ No newline at end of file
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/Priority.java b/src/main/java/ca/spottedleaf/concurrentutil/util/Priority.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..2919bbaa07b70f182438c3be8f9ebbe0649809b6
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/Priority.java
|
|
@@ -0,0 +1,145 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public enum Priority {
|
|
+
|
|
+ /**
|
|
+ * Priority value indicating the task has completed or is being completed.
|
|
+ * This priority cannot be used to schedule tasks.
|
|
+ */
|
|
+ COMPLETING(-1),
|
|
+
|
|
+ /**
|
|
+ * Absolute highest priority, should only be used for when a task is blocking a time-critical thread.
|
|
+ */
|
|
+ BLOCKING(),
|
|
+
|
|
+ /**
|
|
+ * Should only be used for urgent but not time-critical tasks.
|
|
+ */
|
|
+ HIGHEST(),
|
|
+
|
|
+ /**
|
|
+ * Two priorities above normal.
|
|
+ */
|
|
+ HIGHER(),
|
|
+
|
|
+ /**
|
|
+ * One priority above normal.
|
|
+ */
|
|
+ HIGH(),
|
|
+
|
|
+ /**
|
|
+ * Default priority.
|
|
+ */
|
|
+ NORMAL(),
|
|
+
|
|
+ /**
|
|
+ * One priority below normal.
|
|
+ */
|
|
+ LOW(),
|
|
+
|
|
+ /**
|
|
+ * Two priorities below normal.
|
|
+ */
|
|
+ LOWER(),
|
|
+
|
|
+ /**
|
|
+ * Use for tasks that should eventually execute, but are not needed to.
|
|
+ */
|
|
+ LOWEST(),
|
|
+
|
|
+ /**
|
|
+ * Use for tasks that can be delayed indefinitely.
|
|
+ */
|
|
+ IDLE();
|
|
+
|
|
+ // returns whether the priority can be scheduled
|
|
+ public static boolean isValidPriority(final Priority priority) {
|
|
+ return priority != null && priority != priority.COMPLETING;
|
|
+ }
|
|
+
|
|
+ // returns the higher priority of the two
|
|
+ public static Priority max(final Priority p1, final Priority p2) {
|
|
+ return p1.isHigherOrEqualPriority(p2) ? p1 : p2;
|
|
+ }
|
|
+
|
|
+ // returns the lower priroity of the two
|
|
+ public static Priority min(final Priority p1, final Priority p2) {
|
|
+ return p1.isLowerOrEqualPriority(p2) ? p1 : p2;
|
|
+ }
|
|
+
|
|
+ public boolean isHigherOrEqualPriority(final Priority than) {
|
|
+ return this.priority <= than.priority;
|
|
+ }
|
|
+
|
|
+ public boolean isHigherPriority(final Priority than) {
|
|
+ return this.priority < than.priority;
|
|
+ }
|
|
+
|
|
+ public boolean isLowerOrEqualPriority(final Priority than) {
|
|
+ return this.priority >= than.priority;
|
|
+ }
|
|
+
|
|
+ public boolean isLowerPriority(final Priority than) {
|
|
+ return this.priority > than.priority;
|
|
+ }
|
|
+
|
|
+ public boolean isHigherOrEqualPriority(final int than) {
|
|
+ return this.priority <= than;
|
|
+ }
|
|
+
|
|
+ public boolean isHigherPriority(final int than) {
|
|
+ return this.priority < than;
|
|
+ }
|
|
+
|
|
+ public boolean isLowerOrEqualPriority(final int than) {
|
|
+ return this.priority >= than;
|
|
+ }
|
|
+
|
|
+ public boolean isLowerPriority(final int than) {
|
|
+ return this.priority > than;
|
|
+ }
|
|
+
|
|
+ public static boolean isHigherOrEqualPriority(final int priority, final int than) {
|
|
+ return priority <= than;
|
|
+ }
|
|
+
|
|
+ public static boolean isHigherPriority(final int priority, final int than) {
|
|
+ return priority < than;
|
|
+ }
|
|
+
|
|
+ public static boolean isLowerOrEqualPriority(final int priority, final int than) {
|
|
+ return priority >= than;
|
|
+ }
|
|
+
|
|
+ public static boolean isLowerPriority(final int priority, final int than) {
|
|
+ return priority > than;
|
|
+ }
|
|
+
|
|
+ static final Priority[] PRIORITIES = Priority.values();
|
|
+
|
|
+ /** includes special priorities */
|
|
+ public static final int TOTAL_PRIORITIES = PRIORITIES.length;
|
|
+
|
|
+ public static final int TOTAL_SCHEDULABLE_PRIORITIES = TOTAL_PRIORITIES - 1;
|
|
+
|
|
+ public static Priority getPriority(final int priority) {
|
|
+ return PRIORITIES[priority + 1];
|
|
+ }
|
|
+
|
|
+ private static int priorityCounter;
|
|
+
|
|
+ private static int nextCounter() {
|
|
+ return priorityCounter++;
|
|
+ }
|
|
+
|
|
+ public final int priority;
|
|
+
|
|
+ private Priority() {
|
|
+ this(nextCounter());
|
|
+ }
|
|
+
|
|
+ private Priority(final int priority) {
|
|
+ this.priority = priority;
|
|
+ }
|
|
+}
|
|
\ No newline at end of file
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/ThrowUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/ThrowUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..a3a8b5c6795c4d116e094e4c910553416f565b93
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/ThrowUtil.java
|
|
@@ -0,0 +1,11 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class ThrowUtil {
|
|
+
|
|
+ private ThrowUtil() {}
|
|
+
|
|
+ public static <T extends Throwable> void throwUnchecked(final Throwable thr) throws T {
|
|
+ throw (T)thr;
|
|
+ }
|
|
+
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..63688716244066581d5b505703576e3340e3baf3
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java
|
|
@@ -0,0 +1,60 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class TimeUtil {
|
|
+
|
|
+ /*
|
|
+ * The comparator is not a valid comparator for every long value. To prove where it is valid, see below.
|
|
+ *
|
|
+ * For reflexivity, we have that x - x = 0. We then have that for any long value x that
|
|
+ * compareTimes(x, x) == 0, as expected.
|
|
+ *
|
|
+ * For symmetry, we have that x - y = -(y - x) except for when y - x = Long.MIN_VALUE.
|
|
+ * So, the difference between any times x and y must not be equal to Long.MIN_VALUE.
|
|
+ *
|
|
+ * As for the transitive relation, consider we have x,y such that x - y = a > 0 and z such that
|
|
+ * y - z = b > 0. Then, we will have that the x - z > 0 is equivalent to a + b > 0. For long values,
|
|
+ * this holds as long as a + b <= Long.MAX_VALUE.
|
|
+ *
|
|
+ * Also consider we have x, y such that x - y = a < 0 and z such that y - z = b < 0. Then, we will have
|
|
+ * that x - z < 0 is equivalent to a + b < 0. For long values, this holds as long as a + b >= -Long.MAX_VALUE.
|
|
+ *
|
|
+ * Thus, the comparator is only valid for timestamps such that abs(c - d) <= Long.MAX_VALUE for all timestamps
|
|
+ * c and d.
|
|
+ */
|
|
+
|
|
+ /**
|
|
+ * This function is appropriate to be used as a {@link java.util.Comparator} between two timestamps, which
|
|
+ * indicates whether the timestamps represented by t1, t2 that t1 is before, equal to, or after t2.
|
|
+ */
|
|
+ public static int compareTimes(final long t1, final long t2) {
|
|
+ final long diff = t1 - t2;
|
|
+
|
|
+ // HD, Section 2-7
|
|
+ return (int) ((diff >> 63) | (-diff >>> 63));
|
|
+ }
|
|
+
|
|
+ public static long getGreatestTime(final long t1, final long t2) {
|
|
+ final long diff = t1 - t2;
|
|
+ return diff < 0L ? t2 : t1;
|
|
+ }
|
|
+
|
|
+ public static long getLeastTime(final long t1, final long t2) {
|
|
+ final long diff = t1 - t2;
|
|
+ return diff > 0L ? t2 : t1;
|
|
+ }
|
|
+
|
|
+ public static long clampTime(final long value, final long min, final long max) {
|
|
+ final long diffMax = value - max;
|
|
+ final long diffMin = value - min;
|
|
+
|
|
+ if (diffMax > 0L) {
|
|
+ return max;
|
|
+ }
|
|
+ if (diffMin < 0L) {
|
|
+ return min;
|
|
+ }
|
|
+ return value;
|
|
+ }
|
|
+
|
|
+ private TimeUtil() {}
|
|
+}
|
|
diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/Validate.java b/src/main/java/ca/spottedleaf/concurrentutil/util/Validate.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..382177d0d162fa3139c9078a873ce2504a2b17b2
|
|
--- /dev/null
|
|
+++ b/src/main/java/ca/spottedleaf/concurrentutil/util/Validate.java
|
|
@@ -0,0 +1,28 @@
|
|
+package ca.spottedleaf.concurrentutil.util;
|
|
+
|
|
+public final class Validate {
|
|
+
|
|
+ public static <T> T notNull(final T obj) {
|
|
+ if (obj == null) {
|
|
+ throw new NullPointerException();
|
|
+ }
|
|
+ return obj;
|
|
+ }
|
|
+
|
|
+ public static <T> T notNull(final T obj, final String msgIfNull) {
|
|
+ if (obj == null) {
|
|
+ throw new NullPointerException(msgIfNull);
|
|
+ }
|
|
+ return obj;
|
|
+ }
|
|
+
|
|
+ public static void arrayBounds(final int off, final int len, final int arrayLength, final String msgPrefix) {
|
|
+ if (off < 0 || len < 0 || (arrayLength - off) < len) {
|
|
+ throw new ArrayIndexOutOfBoundsException(msgPrefix + ": off: " + off + ", len: " + len + ", array length: " + arrayLength);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private Validate() {
|
|
+ throw new RuntimeException();
|
|
+ }
|
|
+}
|