ConcurrentHashMap

272 阅读11分钟
  • 继承了AbstractMap类,实现了ConcurrentMap和Serializable接口,其中ConcurrentMap里定义了一些原子操作
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
    implements ConcurrentMap<K,V>, Serializable {
    private static final long serialVersionUID = 7249069246763182397L;
  • 持有的属性
   // 持有的属性
   // 最大容量
    private static final int MAXIMUM_CAPACITY = 1 << 30;

    // 默认容量
    private static final int DEFAULT_CAPACITY = 16;
    
    /**
     * The largest possible (non-power of two) array size.
     * Needed by toArray and related methods.
     */
    static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
    
    /**
     * The default concurrency level for this table. Unused but
     * defined for compatibility with previous versions of this class.
     * 默认的并发等级,Jdk1.8已经不再使用这个了,在这里是为了兼容之前的版本
     */
    private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
    
    /**
     * The load factor for this table. Overrides of this value in
     * constructors affect only the initial table capacity.  The
     * actual floating point value isn't normally used -- it is
     * simpler to use expressions such as {@code n - (n >>> 2)} for
     * the associated resizing threshold.
     * 默认的负载因子,Jdk1.8也是基本不用,兼容之前的版本
     */
    private static final float LOAD_FACTOR = 0.75f;
    
    // 链表转化成红黑树的阈值,链表元素个数大于或者等于这个值会尝试将链表转化成红黑树
    static final int TREEIFY_THRESHOLD = 8;
    
    // 红黑树转化成链表的阈值,元素个数小于或等于这个值会尝试将红黑树转化成链表
    static final int UNTREEIFY_THRESHOLD = 6;
    
    // 当链表长度大于或等于8时,如果map的容量小于这个值,则不转化成红黑树,而是进行扩容
    static final int MIN_TREEIFY_CAPACITY = 64;
    
    // 
    private static final int MIN_TRANSFER_STRIDE = 16;
    
    // 计算扩容标识的变量之一
    private static int RESIZE_STAMP_BITS = 16;
    
    // 进行扩容的最大线程数
    private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
    
    // 计算扩容标识的变量之一
    private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
    
    // 不同状态或者节点下的hash值
    static final int MOVED     = -1; // hash for forwarding nodes
    static final int TREEBIN   = -2; // hash for roots of trees
    static final int RESERVED  = -3; // hash for transient reservations
    static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
    
    // cpu的核心数
    static final int NCPU = Runtime.getRuntime().availableProcessors();
  • 四种不同类型的节点
    // 链表节点的结构
    static class Node<K,V> implements Map.Entry<K,V> {
        final int hash;
        final K key;
        volatile V val;
        volatile Node<K,V> next;

        Node(int hash, K key, V val, Node<K,V> next) {
            this.hash = hash;
            this.key = key;
            this.val = val;
            this.next = next;
        }

        public final K getKey()       { return key; }
        public final V getValue()     { return val; }
        public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
        public final String toString(){ return key + "=" + val; }
        public final V setValue(V value) {
            throw new UnsupportedOperationException();
        }

        public final boolean equals(Object o) {
            Object k, v, u; Map.Entry<?,?> e;
            return ((o instanceof Map.Entry) &&
                    (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
                    (v = e.getValue()) != null &&
                    (k == key || k.equals(key)) &&
                    (v == (u = val) || v.equals(u)));
        }

        /**
         * Virtualized support for map.get(); overridden in subclasses.
         */
        // 当头节点的hash值小于0时,则说明不是链表节点,那么就会调用头节点的find方法去找对应的节点
        Node<K,V> find(int h, Object k) {
            Node<K,V> e = this;
            if (k != null) {
                do {
                    K ek;
                    if (e.hash == h &&
                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
                        return e;
                } while ((e = e.next) != null);
            }
            return null;
        }
    }
    
    // 红黑树结点
    static final class TreeNode<K, V> extends Node<K, V> {
        boolean red;

        TreeNode<K, V> parent;
        TreeNode<K, V> left;
        TreeNode<K, V> right;

        /**
         * prev指针是为了方便删除.
         * 删除链表的非头结点时,需要知道它的前驱结点才能删除,所以直接提供一个prev指针
         */
        TreeNode<K, V> prev;

        TreeNode(int hash, K key, V val, Node<K, V> next,
                 TreeNode<K, V> parent) {
            super(hash, key, val, next);
            this.parent = parent;
        }

        Node<K, V> find(int h, Object k) {
            return findTreeNode(h, k, null);
        }

        /**
         * 以当前结点(this)为根结点,开始遍历查找指定key.
         */
        final TreeNode<K, V> findTreeNode(int h, Object k, Class<?> kc) {
            if (k != null) {
                TreeNode<K, V> p = this;
                do {
                    int ph, dir;
                    K pk;
                    TreeNode<K, V> q;
                    TreeNode<K, V> pl = p.left, pr = p.right;
                    if ((ph = p.hash) > h)
                        p = pl;
                    else if (ph < h)
                        p = pr;
                    else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
                        return p;
                    else if (pl == null)
                        p = pr;
                    else if (pr == null)
                        p = pl;
                    else if ((kc != null ||
                            (kc = comparableClassFor(k)) != null) &&
                            (dir = compareComparables(kc, k, pk)) != 0)
                        p = (dir < 0) ? pl : pr;
                    else if ((q = pr.findTreeNode(h, k, kc)) != null)
                        return q;
                    else
                        p = pl;
                } while (p != null);
            }
            return null;
        }
    }
    
       // 
    
 
/**
 * TreeNode的代理结点(相当于封装了TreeNode的容器,提供针对红黑树的转换操作和锁控制)
 * 继承自Node
 * hash值固定为-2
 */
static final class TreeBin<K, V> extends Node<K, V> {
    TreeNode<K, V> root;                // 红黑树结构的根结点
    volatile TreeNode<K, V> first;      // 链表结构的头结点
    volatile Thread waiter;             // 最近的一个设置WAITER标识位的线程

    volatile int lockState;             // 整体的锁状态标识位

    static final int WRITER = 1;        // 二进制001,红黑树的写锁状态
    static final int WAITER = 2;        // 二进制010,红黑树的等待获取写锁状态
    static final int READER = 4;        // 二进制100,红黑树的读锁状态,读可以并发,每多一个读线程,lockState都加上一个READER值

    /**
     * 在hashCode相等并且不是Comparable类型时,用此方法判断大小.
     */
    static int tieBreakOrder(Object a, Object b) {
        int d;
        if (a == null || b == null ||
            (d = a.getClass().getName().
                compareTo(b.getClass().getName())) == 0)
            d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
                -1 : 1);
        return d;
    }

    /**
     * 将以b为头结点的链表转换为红黑树.
     * 这里就可以看到当我们上面将Node节点转化成TreeNode节点后,将头节点作为参数调用TreeBin的构造方法时会在这里将其重构成一棵红黑树。
     */
    TreeBin(TreeNode<K, V> b) {
        super(TREEBIN, null, null, null);
        this.first = b;
        TreeNode<K, V> r = null;
        for (TreeNode<K, V> x = b, next; x != null; x = next) {
            next = (TreeNode<K, V>) x.next;
            x.left = x.right = null;
            if (r == null) {
                x.parent = null;
                x.red = false;
                r = x;
            } else {
                K k = x.key;
                int h = x.hash;
                Class<?> kc = null;
                for (TreeNode<K, V> p = r; ; ) {
                    int dir, ph;
                    K pk = p.key;
                    if ((ph = p.hash) > h)
                        dir = -1;
                    else if (ph < h)
                        dir = 1;
                    else if ((kc == null &&
                        (kc = comparableClassFor(k)) == null) ||
                        (dir = compareComparables(kc, k, pk)) == 0)
                        dir = tieBreakOrder(k, pk);
                    TreeNode<K, V> xp = p;
                    if ((p = (dir <= 0) ? p.left : p.right) == null) {
                        x.parent = xp;
                        if (dir <= 0)
                            xp.left = x;
                        else
                            xp.right = x;
                        r = balanceInsertion(r, x);
                        break;
                    }
                }
            }
        }
        this.root = r;
        assert checkInvariants(root);
    }
    
    /**
     * 对红黑树的根结点加写锁.
     */
    private final void lockRoot() {
        if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
            contendedLock();
    }

    /**
     * 释放写锁.
     */
    private final void unlockRoot() {
        lockState = 0;
    }

    /**
     * Possibly blocks awaiting root lock.
     */
    private final void contendedLock() {
        boolean waiting = false;
        for (int s; ; ) {
            if (((s = lockState) & ~WAITER) == 0) {
                if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
                    if (waiting)
                        waiter = null;
                    return;
                }
            } else if ((s & WAITER) == 0) {
                if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
                    waiting = true;
                    waiter = Thread.currentThread();
                }
            } else if (waiting)
                LockSupport.park(this);
        }
    }

    /**
     * 从根结点开始遍历查找,找到“相等”的结点就返回它,没找到就返回null
     * 当存在写锁时,以链表方式进行查找
     */
    final Node<K, V> find(int h, Object k) {
        if (k != null) {
            for (Node<K, V> e = first; e != null; ) {
                int s;
                K ek;
                /**
                 * 两种特殊情况下以链表的方式进行查找:
                 * 1. 有线程正持有写锁,这样做能够不阻塞读线程
                 * 2. 有线程等待获取写锁,不再继续加读锁,相当于“写优先”模式
                 */
                if (((s = lockState) & (WAITER | WRITER)) != 0) {
                    if (e.hash == h &&
                        ((ek = e.key) == k || (ek != null && k.equals(ek))))
                        return e;
                    e = e.next;
                // 否则给红黑树加读锁
                } else if (U.compareAndSwapInt(this, LOCKSTATE, s,
                    s + READER)) {
                    TreeNode<K, V> r, p;
                    try {
                        p = ((r = root) == null ? null :
                            r.findTreeNode(h, k, null));
                    } finally {
                        Thread w;
                        if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
                            (READER | WAITER) && (w = waiter) != null)
                            LockSupport.unpark(w);
                    }
                    return p;
                }
            }
        }
        return null;
    }

    /**
     * 查找指定key对应的结点,如果未找到,则插入.
     *
     * @return 插入成功返回null, 否则返回找到的结点
     */
    final TreeNode<K, V> putTreeVal(int h, K k, V v) {
        Class<?> kc = null;
        boolean searched = false;
        for (TreeNode<K, V> p = root; ; ) {
            int dir, ph;
            K pk;
            if (p == null) {
                first = root = new TreeNode<K, V>(h, k, v, null, null);
                break;
            } else if ((ph = p.hash) > h)
                dir = -1;
            else if (ph < h)
                dir = 1;
            else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
                return p;
            else if ((kc == null &&
                (kc = comparableClassFor(k)) == null) ||
                (dir = compareComparables(kc, k, pk)) == 0) {
                if (!searched) {
                    TreeNode<K, V> q, ch;
                    searched = true;
                    if (((ch = p.left) != null &&
                        (q = ch.findTreeNode(h, k, kc)) != null) ||
                        ((ch = p.right) != null &&
                            (q = ch.findTreeNode(h, k, kc)) != null))
                        return q;
                }
                dir = tieBreakOrder(k, pk);
            }

            TreeNode<K, V> xp = p;
            if ((p = (dir <= 0) ? p.left : p.right) == null) {
                TreeNode<K, V> x, f = first;
                first = x = new TreeNode<K, V>(h, k, v, f, xp);
                if (f != null)
                    f.prev = x;
                if (dir <= 0)
                    xp.left = x;
                else
                    xp.right = x;
                if (!xp.red)
                    x.red = true;
                else {
                    lockRoot();
                    try {
                        root = balanceInsertion(root, x);
                    } finally {
                        unlockRoot();
                    }
                }
                break;
            }
        }
        assert checkInvariants(root);
        return null;
    }

    /**
     * 删除红黑树的结点:
     * 1. 红黑树规模太小时,返回true,然后进行 树 -> 链表 的转化;
     * 2. 红黑树规模足够时,不用变换成链表,但删除结点时需要加写锁.
     */
    final boolean removeTreeNode(TreeNode<K, V> p) {
        TreeNode<K, V> next = (TreeNode<K, V>) p.next;
        TreeNode<K, V> pred = p.prev;  // unlink traversal pointers
        TreeNode<K, V> r, rl;
        if (pred == null)
            first = next;
        else
            pred.next = next;
        if (next != null)
            next.prev = pred;
        if (first == null) {
            root = null;
            return true;
        }
        if ((r = root) == null || r.right == null || // too small
            (rl = r.left) == null || rl.left == null)
            return true;
        lockRoot();
        try {
            TreeNode<K, V> replacement;
            TreeNode<K, V> pl = p.left;
            TreeNode<K, V> pr = p.right;
            if (pl != null && pr != null) {
                TreeNode<K, V> s = pr, sl;
                while ((sl = s.left) != null) // find successor
                    s = sl;
                boolean c = s.red;
                s.red = p.red;
                p.red = c; // swap colors
                TreeNode<K, V> sr = s.right;
                TreeNode<K, V> pp = p.parent;
                if (s == pr) { // p was s's direct parent
                    p.parent = s;
                    s.right = p;
                } else {
                    TreeNode<K, V> sp = s.parent;
                    if ((p.parent = sp) != null) {
                        if (s == sp.left)
                            sp.left = p;
                        else
                            sp.right = p;
                    }
                    if ((s.right = pr) != null)
                        pr.parent = s;
                }
                p.left = null;
                if ((p.right = sr) != null)
                    sr.parent = p;
                if ((s.left = pl) != null)
                    pl.parent = s;
                if ((s.parent = pp) == null)
                    r = s;
                else if (p == pp.left)
                    pp.left = s;
                else
                    pp.right = s;
                if (sr != null)
                    replacement = sr;
                else
                    replacement = p;
            } else if (pl != null)
                replacement = pl;
            else if (pr != null)
                replacement = pr;
            else
                replacement = p;
            if (replacement != p) {
                TreeNode<K, V> pp = replacement.parent = p.parent;
                if (pp == null)
                    r = replacement;
                else if (p == pp.left)
                    pp.left = replacement;
                else
                    pp.right = replacement;
                p.left = p.right = p.parent = null;
            }

            root = (p.red) ? r : balanceDeletion(r, replacement);

            if (p == replacement) {  // detach pointers
                TreeNode<K, V> pp;
                if ((pp = p.parent) != null) {
                    if (p == pp.left)
                        pp.left = null;
                    else if (p == pp.right)
                        pp.right = null;
                    p.parent = null;
                }
            }
        } finally {
            unlockRoot();
        }
        assert checkInvariants(root);
        return false;
    }
    // 还有很多其他的方法,这里就不一一展示了
    ....
}

/**
 * ForwardingNode是一种临时结点,在扩容进行中才会出现,hash值固定为-1,且不存储实际数据。
 * 如果旧table数组的一个hash桶中全部的结点都迁移到了新table中,则在这个桶中放置一个ForwardingNode。
 * 读操作碰到ForwardingNode时,将操作转发到扩容后的新table数组上去执行;写操作碰见它时,则尝试帮助扩容。
 *hash值为-1
 */
static final class ForwardingNode<K, V> extends Node<K, V> {
    final Node<K, V>[] nextTable;

    ForwardingNode(Node<K, V>[] tab) {
        super(MOVED, null, null, null);
        this.nextTable = tab;
    }

    // 在新的数组nextTable上进行查找
    Node<K, V> find(int h, Object k) {
        // loop to avoid arbitrarily deep recursion on forwarding nodes
        // 调用ForwardingNode的find方法,去新的table上找节点
        outer:
        // 拿到新的table
        for (Node<K, V>[] tab = nextTable; ; ) {
            Node<K, V> e;
            int n;
            // 如果对应位置为null,或者table为null或者长度为0,则直接返回null
            if (k == null || tab == null || (n = tab.length) == 0 ||
                (e = tabAt(tab, (n - 1) & h)) == null)
                return null;
            // 遍历对应位置的链表或者红黑树
            for (; ; ) {
                int eh;
                K ek;
                // 如果该是要找的节点,则返回对应的值
                if ((eh = e.hash) == h &&
                    ((ek = e.key) == k || (ek != null && k.equals(ek))))
                    return e;
                // 如果该节点hash值小于0
                if (eh < 0) {
                    // 判断是否为ForwardingNode,如果是则说明又发生了一次迁移,则去新的table上找节点。
                    if (e instanceof ForwardingNode) {
                        tab = ((ForwardingNode<K, V>) e).nextTable;
                        continue outer;
                    } else
                        // 如果不是ForwardingNode,则调用该节点的find方法
                        return e.find(h, k);
                }
                // 如果遍历完还没找到则返回null
                if ((e = e.next) == null)
                    return null;
            }
        }
    }
}

/**
 * 保留结点.
 * hash值固定为-3, 不保存实际数据
 * 只在computeIfAbsent和compute这两个函数式API中充当占位符加锁使用
 */
static final class ReservationNode<K, V> extends Node<K, V> {
    ReservationNode() {
        super(RESERVED, null, null, null);
    }

    Node<K, V> find(int h, Object k) {
        return null;
    }
}
  • 一些主要的方法
    // 此方法计算hash值,类似HashMap中的hash方法
    static final int spread(int h) {
        return (h ^ (h >>> 16)) & HASH_BITS;
    }
    
    // 返回大于传入值的最小二次幂
    private static final int tableSizeFor(int c) {
        int n = c - 1;
        n |= n >>> 1;
        n |= n >>> 2;
        n |= n >>> 4;
        n |= n >>> 8;
        n |= n >>> 16;
        return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
    }
    
    // 返回哈希表中索引为i的元素,ABASE为哈希表第一个元素的地址 
    static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
        return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
    }
    // 使用cas方法替换掉哈希表中索引为i的元素
    static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
                                        Node<K,V> c, Node<K,V> v) {
        return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
    }
    // 给哈希表索引为i的位置设置值
    static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
        U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
    }
    
    // table就是哈希表,被volatile修饰(可见性和有序性)
    transient volatile Node<K,V>[] table;
    
    // 当没有多个线程并发的时候,map内的元素的个数在baseCount上累加,当有线程并发的时候,元素个数在不同的累加单元上累加。(类似LongAdder)
    private transient volatile long baseCount;
    
    /*
    *初始化和扩容控制变量
    *当这个变量为负数时则表示正在初始化或者扩容,-1表示初始化,否则表示-(1+正在扩容的线程数量)
    *当哈希表为空时,持有哈希表的初始化容量大小,或者默认为0
    *哈希表初始化后则保存下一次扩容的容量
    /
    private transient volatile int sizeCtl;
    
    // 扩容时用到 The next table index (plus one) to split while resizing.
    private transient volatile int transferIndex;
    
    // 扩容或者创建累加单元时使用到的自旋锁
    private transient volatile int cellsBusy;
    
    // 累加单元,线程多线程并发时,在累加单元上累加map持有的元素个数,根据hash值定位在哪个counterCell上进行累加
    private transient volatile CounterCell[] counterCells;
    
    // 无参构造方法
    public ConcurrentHashMap() {
    }
    
    // 有参构造方法,传入指定的容量(实际创建的容量是传入容量的最小2次幂)
    public ConcurrentHashMap(int initialCapacity) {
        if (initialCapacity < 0)
            throw new IllegalArgumentException();
        int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
                   MAXIMUM_CAPACITY :
                   tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
        this.sizeCtl = cap;
    }
    
    // 有参构造方法,传入一个map
    public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
        this.sizeCtl = DEFAULT_CAPACITY;
        putAll(m);
    }
    
    // 两参的构造方法,实际调用的是三参的构造方法
    public ConcurrentHashMap(int initialCapacity, float loadFactor) {
        this(initialCapacity, loadFactor, 1);
    }
    
    // 三参的构造方法
    public ConcurrentHashMap(int initialCapacity,
                             float loadFactor, int concurrencyLevel) {
        if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
            throw new IllegalArgumentException();
        if (initialCapacity < concurrencyLevel)   // Use at least as many bins
            initialCapacity = concurrencyLevel;   // as estimated threads
        long size = (long)(1.0 + (long)initialCapacity / loadFactor);
        int cap = (size >= (long)MAXIMUM_CAPACITY) ?
            MAXIMUM_CAPACITY : tableSizeFor((int)size);
        this.sizeCtl = cap;
    }
    
    //从上面的构造方法可以看到,sizeCtl变量最后都是持有的初始化后map的容量
    
    // size方法返回map内当前元素的个数,实际调用的是sumCount方法
    public int size() {
        long n = sumCount();
        return ((n < 0L) ? 0 :
                (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
                (int)n);
    }
    
    // 统计当前map内元素的个数(对baseCount和累加单元进行求和)
    final long sumCount() {
        CounterCell[] as = counterCells; CounterCell a;
        long sum = baseCount;
        if (as != null) {
            for (int i = 0; i < as.length; ++i) {
                if ((a = as[i]) != null)
                    sum += a.value;
            }
        }
        return sum;
    }
    
    // 判断map是否为空
    public boolean isEmpty() {
        return sumCount() <= 0L; // ignore transient negative values
    }
    
    // get方法
    public V get(Object key) {
        Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
        // 根据key的hashcode计算出hash值
        int h = spread(key.hashCode());
        // 判断哈希表不为空且长度大于0,且hash值索引的元素不为空
        if ((tab = table) != null && (n = tab.length) > 0 &&
            (e = tabAt(tab, (n - 1) & h)) != null) {
            // 如果头节点就是要找的,则返回头节点的value
            if ((eh = e.hash) == h) {
                if ((ek = e.key) == key || (ek != null && key.equals(ek)))
                    return e.val;
            }
            // 如果头节点的hash值小于0,则说明不是普通的节点,则调用这个节点的find方法去找这个值
            else if (eh < 0)
                return (p = e.find(h, key)) != null ? p.val : null;
            // 头节点不为空,且不是其他节点,那么就遍历链表节点,找到了就返回值
            while ((e = e.next) != null) {
                if (e.hash == h &&
                    ((ek = e.key) == key || (ek != null && key.equals(ek))))
                    return e.val;
            }
        }
        // 没找到返回null
        return null;
    }
    
    // 判断某个key是否存在
    public boolean containsKey(Object key) {
        return get(key) != null;
    }
    
    // put方法,实际调用的putVal
    public V put(K key, V value) {
        return putVal(key, value, false);
    }
    
    final V putVal(K key, V value, boolean onlyIfAbsent) {
        // 如果key或者value为null,则直接抛出空指针异常
        if (key == null || value == null) throw new NullPointerException();
        // 根据key计算出对应的hash值
        int hash = spread(key.hashCode());
        // 遍历链表或者红黑树是统计元素个数
        int binCount = 0;
        // for循环(自旋机制,cas失败则自旋重试,直至put成功)
        for (Node<K,V>[] tab = table;;) {
            Node<K,V> f; int n, i, fh;
            // 如果哈希表为空或者长度为0,则初始化哈希表
            if (tab == null || (n = tab.length) == 0)
                tab = initTable();
            // 如果hash计算出的index对应位置没有元素,则将键值对封装成Node节点用cas方法放在对应位置
            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
                if (casTabAt(tab, i, null,
                             new Node<K,V>(hash, key, value, null)))
                    break;                   // no lock when adding to empty bin
            }
            // 如果hash值为MOVED(-1),则说明正在扩容,则当前线程协助扩容
            else if ((fh = f.hash) == MOVED)
                tab = helpTransfer(tab, f);
            // 否则对该节点上锁
            else {
                V oldVal = null;
                synchronized (f) {
                    if (tabAt(tab, i) == f) {
                        // 如果该节点的hash值大于0,则说明是链表
                        if (fh >= 0) {
                            binCount = 1;
                            // 遍历链表,如果key已存在则覆盖旧值
                            for (Node<K,V> e = f;; ++binCount) {
                                K ek;
                                if (e.hash == hash &&
                                    ((ek = e.key) == key ||
                                     (ek != null && key.equals(ek)))) {
                                    oldVal = e.val;
                                    if (!onlyIfAbsent)
                                        e.val = value;
                                    break;
                                }
                                // 没有在链表中找打对应的key,则将键值对封装成节点,放在链表尾部
                                Node<K,V> pred = e;
                                if ((e = e.next) == null) {
                                    pred.next = new Node<K,V>(hash, key,
                                                              value, null);
                                    break;
                                }
                            }
                        }
                        // 如果是树节点,则调用树节点的put方法
                        else if (f instanceof TreeBin) {
                            Node<K,V> p;
                            binCount = 2;
                            // 这里如果没有返回putTreeValue如果返回null则代表没有找到相同的key,则直接将键值对加到树中,如果找到了相同的key,则返回对应的节点,用新值替代旧值。
                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
                                                           value)) != null) {
                                oldVal = p.val;
                                if (!onlyIfAbsent)
                                    p.val = value;
                            }
                        }
                    }
                }
                
                if (binCount != 0) {
                    // 如果binCount大于8,则说明不是不是红黑树,且尝试将链表转化成红黑树
                    if (binCount >= TREEIFY_THRESHOLD)
                        treeifyBin(tab, i);
                    if (oldVal != null)
                        return oldVal;
                    break;
                }
            }
        }
        // 计数值加1
        addCount(1L, binCount);
        return null;
    }
    
    // 尝试将链表转化成红黑树
    private final void treeifyBin(Node<K,V>[] tab, int index) {
        Node<K,V> b; int n, sc;
        if (tab != null) {
            // 如果哈希表的长度小于我们的阈值64,则扩容
            if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
                tryPresize(n << 1);
            // 否则,如果对应位置的链表不为null,且hash值大于0
            else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
                // 给要转化的链表上锁
                synchronized (b) {
                    if (tabAt(tab, index) == b) {
                        TreeNode<K,V> hd = null, tl = null;
                        // 遍历链表,将Node节点转化为TreeNode节点
                        for (Node<K,V> e = b; e != null; e = e.next) {
                            TreeNode<K,V> p =
                                new TreeNode<K,V>(e.hash, e.key, e.val,
                                                  null, null);
                            if ((p.prev = tl) == null)
                                hd = p;
                            else
                                tl.next = p;
                            tl = p;
                        }
                        // 将头节点用TreeBin封装,放到哈希表对应的位置上(TreeBin里面实现了很多红黑树相关的方法)
                        setTabAt(tab, index, new TreeBin<K,V>(hd));
                    }
                }
            }
        }
    }
  • 扩容和节点迁移方法
    // 后续再加上

一些问题

1.ConcurrentHashMap1.7和1.8的区别?

首先1.7底层会持有一个Segement数组,其中segement元素继承自ReentrantLock,用于存放HashEntry[] (结构可以看作一个HashMap),当我们去put一个元素的时候,不会将整个map锁住,而是会将Segement数组中的某一个元素锁住,也就是分段锁。这样锁的粒度更小,并发程度更高,性能更好。

image.png 1.8底层放弃了分段锁的方法,转而采用了Synchronized和cas来保证线程的安全性,同时底层结构也变为了数组加红黑树和链表。参考上面的源码。