source: mainline/kernel/generic/include/lib/refcount.h@ 151c050

Last change on this file since 151c050 was 1871118, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 2 years ago

Make thread_t reference counted

This simplifies interaction between various locks and thread
lifespan, which simplifies things. For example, threads_lock can
now simply be a mutex protecting the global it was made for, and
nothing more.

  • Property mode set to 100644
File size: 4.5 KB
Line 
1/*
2 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Authors:
31 * Jiří Zárevúcky (jzr) <zarevucky.jiri@gmail.com>
32 */
33
34/*
35 * Using atomics for reference counting efficiently is a little tricky,
36 * so we define a unified API for this.
37 */
38
39#ifndef _LIBC_REFCOUNT_H_
40#define _LIBC_REFCOUNT_H_
41
42#include <assert.h>
43#include <stdatomic.h>
44#include <stdbool.h>
45
46/* Wrapped in a structure to prevent direct manipulation. */
47typedef struct atomic_refcount {
48 volatile atomic_int __cnt;
49} atomic_refcount_t;
50
51static inline void refcount_init(atomic_refcount_t *rc)
52{
53 atomic_store_explicit(&rc->__cnt, 0, memory_order_relaxed);
54}
55
56/**
57 * Increment a reference count.
58 *
59 * Calling this without already owning a reference is undefined behavior.
60 * E.g. acquiring a reference through a shared mutable pointer requires that
61 * the caller first locks the pointer itself (thereby acquiring the reference
62 * inherent to the shared variable), and only then may call refcount_up().
63 */
64static inline void refcount_up(atomic_refcount_t *rc)
65{
66 // NOTE: We can use relaxed operation because acquiring a reference
67 // implies no ordering relationships. A reference-counted object
68 // still needs to be synchronized independently of the refcount.
69
70 int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
71 memory_order_relaxed);
72
73 /* old < 0 indicates that the function is used incorrectly. */
74 assert(old >= 0);
75 (void) old;
76}
77
78/**
79 * Try to upgrade a weak reference.
80 * Naturally, this is contingent on another form of synchronization being used
81 * to ensure that the object continues to exist while the weak reference is in
82 * use.
83 */
84static inline bool refcount_try_up(atomic_refcount_t *rc)
85{
86 int cnt = atomic_load_explicit(&rc->__cnt, memory_order_relaxed);
87
88 while (cnt >= 0) {
89 if (atomic_compare_exchange_weak_explicit(&rc->__cnt, &cnt, cnt + 1,
90 memory_order_relaxed, memory_order_relaxed)) {
91 return true;
92 }
93 }
94
95 return false;
96}
97
98static inline bool refcount_unique(atomic_refcount_t *rc)
99{
100 int val = atomic_load_explicit(&rc->__cnt, memory_order_acquire);
101 if (val < 0) {
102 assert(val == -1);
103 }
104
105 return val <= 0;
106}
107
108/**
109 * Decrement a reference count. Caller must own the reference.
110 *
111 * If the function returns `false`, the caller no longer owns the reference and
112 * must not access the reference counted object.
113 *
114 * If the function returns `true`, the caller is now the sole owner of the
115 * reference counted object, and must deallocate it.
116 */
117static inline bool refcount_down(atomic_refcount_t *rc)
118{
119 // NOTE: The decrementers don't need to synchronize with each other,
120 // but they do need to synchronize with the one doing deallocation.
121 int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
122 memory_order_release);
123
124 assert(old >= 0);
125
126 if (old == 0) {
127 // NOTE: We are holding the last reference, so we must now
128 // synchronize with all the other decrementers.
129
130 int val = atomic_load_explicit(&rc->__cnt,
131 memory_order_acquire);
132 assert(val == -1);
133
134 /*
135 * The compiler probably wouldn't optimize the memory barrier
136 * away, but better safe than sorry.
137 */
138 return val < 0;
139 }
140
141 return false;
142}
143
144#endif
Note: See TracBrowser for help on using the repository browser.