source: mainline/kernel/generic/include/lib/refcount.h

Last change on this file was 1a1e124, checked in by Jiří Zárevúcky <zarevucky.jiri@…>, 17 months ago

Add static refcount initializer

  • Property mode set to 100644
File size: 4.5 KB
Line 
1/*
2 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Authors:
31 * Jiří Zárevúcky (jzr) <zarevucky.jiri@gmail.com>
32 */
33
34/*
35 * Using atomics for reference counting efficiently is a little tricky,
36 * so we define a unified API for this.
37 */
38
39#ifndef _LIBC_REFCOUNT_H_
40#define _LIBC_REFCOUNT_H_
41
42#include <assert.h>
43#include <stdatomic.h>
44#include <stdbool.h>
45
46/* Wrapped in a structure to prevent direct manipulation. */
47typedef struct atomic_refcount {
48 volatile atomic_int __cnt;
49} atomic_refcount_t;
50
51#define REFCOUNT_INITIALIZER() { \
52 .__cnt = ATOMIC_VAR_INIT(0), \
53}
54
55static inline void refcount_init(atomic_refcount_t *rc)
56{
57 atomic_init(&rc->__cnt, 0);
58}
59
60/**
61 * Increment a reference count.
62 *
63 * Calling this without already owning a reference is undefined behavior.
64 * E.g. acquiring a reference through a shared mutable pointer requires that
65 * the caller first locks the pointer itself (thereby acquiring the reference
66 * inherent to the shared variable), and only then may call refcount_up().
67 */
68static inline void refcount_up(atomic_refcount_t *rc)
69{
70 // NOTE: We can use relaxed operation because acquiring a reference
71 // implies no ordering relationships. A reference-counted object
72 // still needs to be synchronized independently of the refcount.
73
74 int old = atomic_fetch_add_explicit(&rc->__cnt, 1,
75 memory_order_relaxed);
76
77 /* old < 0 indicates that the function is used incorrectly. */
78 assert(old >= 0);
79 (void) old;
80}
81
82/**
83 * Try to upgrade a weak reference.
84 * Naturally, this is contingent on another form of synchronization being used
85 * to ensure that the object continues to exist while the weak reference is in
86 * use.
87 */
88static inline bool refcount_try_up(atomic_refcount_t *rc)
89{
90 int cnt = atomic_load_explicit(&rc->__cnt, memory_order_relaxed);
91
92 while (cnt >= 0) {
93 if (atomic_compare_exchange_weak_explicit(&rc->__cnt, &cnt, cnt + 1,
94 memory_order_relaxed, memory_order_relaxed)) {
95 return true;
96 }
97 }
98
99 return false;
100}
101
102static inline bool refcount_unique(atomic_refcount_t *rc)
103{
104 int val = atomic_load_explicit(&rc->__cnt, memory_order_acquire);
105 if (val < 0) {
106 assert(val == -1);
107 }
108
109 return val <= 0;
110}
111
112/**
113 * Decrement a reference count. Caller must own the reference.
114 *
115 * If the function returns `false`, the caller no longer owns the reference and
116 * must not access the reference counted object.
117 *
118 * If the function returns `true`, the caller is now the sole owner of the
119 * reference counted object, and must deallocate it.
120 */
121static inline bool refcount_down(atomic_refcount_t *rc)
122{
123 // NOTE: The decrementers don't need to synchronize with each other,
124 // but they do need to synchronize with the one doing deallocation.
125 int old = atomic_fetch_sub_explicit(&rc->__cnt, 1,
126 memory_order_release);
127
128 assert(old >= 0);
129
130 if (old == 0) {
131 // NOTE: We are holding the last reference, so we must now
132 // synchronize with all the other decrementers.
133
134 int val = atomic_load_explicit(&rc->__cnt,
135 memory_order_acquire);
136 assert(val == -1);
137
138 /*
139 * The compiler probably wouldn't optimize the memory barrier
140 * away, but better safe than sorry.
141 */
142 return val < 0;
143 }
144
145 return false;
146}
147
148#endif
Note: See TracBrowser for help on using the repository browser.