1 | /*
|
---|
2 | * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
|
---|
3 | *
|
---|
4 | * Licensed under the Apache License 2.0 (the "License"). You may not use
|
---|
5 | * this file except in compliance with the License. You can obtain a copy
|
---|
6 | * in the file LICENSE in the source distribution or at
|
---|
7 | * https://www.openssl.org/source/license.html
|
---|
8 | */
|
---|
9 | #ifndef OSSL_INTERNAL_REFCOUNT_H
|
---|
10 | # define OSSL_INTERNAL_REFCOUNT_H
|
---|
11 | # ifndef RT_WITHOUT_PRAGMA_ONCE /* VBOX */
|
---|
12 | # pragma once
|
---|
13 | # endif /* VBOX */
|
---|
14 |
|
---|
15 | # include <openssl/e_os2.h>
|
---|
16 | # include <openssl/trace.h>
|
---|
17 | # include <openssl/err.h>
|
---|
18 |
|
---|
19 | # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
|
---|
20 | # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
|
---|
21 | && !defined(__STDC_NO_ATOMICS__)
|
---|
22 | # include <stdatomic.h>
|
---|
23 | # define HAVE_C11_ATOMICS
|
---|
24 | # endif
|
---|
25 |
|
---|
26 | # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
|
---|
27 | && ATOMIC_INT_LOCK_FREE > 0
|
---|
28 |
|
---|
29 | # define HAVE_ATOMICS 1
|
---|
30 |
|
---|
31 | typedef struct {
|
---|
32 | _Atomic int val;
|
---|
33 | } CRYPTO_REF_COUNT;
|
---|
34 |
|
---|
35 | static inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
36 | {
|
---|
37 | *ret = atomic_fetch_add_explicit(&refcnt->val, 1, memory_order_relaxed) + 1;
|
---|
38 | return 1;
|
---|
39 | }
|
---|
40 |
|
---|
41 | /*
|
---|
42 | * Changes to shared structure other than reference counter have to be
|
---|
43 | * serialized. And any kind of serialization implies a release fence. This
|
---|
44 | * means that by the time reference counter is decremented all other
|
---|
45 | * changes are visible on all processors. Hence decrement itself can be
|
---|
46 | * relaxed. In case it hits zero, object will be destructed. Since it's
|
---|
47 | * last use of the object, destructor programmer might reason that access
|
---|
48 | * to mutable members doesn't have to be serialized anymore, which would
|
---|
49 | * otherwise imply an acquire fence. Hence conditional acquire fence...
|
---|
50 | */
|
---|
51 | static inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
52 | {
|
---|
53 | *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_relaxed) - 1;
|
---|
54 | if (*ret == 0)
|
---|
55 | atomic_thread_fence(memory_order_acquire);
|
---|
56 | return 1;
|
---|
57 | }
|
---|
58 |
|
---|
59 | static inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
60 | {
|
---|
61 | *ret = atomic_load_explicit(&refcnt->val, memory_order_relaxed);
|
---|
62 | return 1;
|
---|
63 | }
|
---|
64 |
|
---|
65 | # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
|
---|
66 |
|
---|
67 | # define HAVE_ATOMICS 1
|
---|
68 |
|
---|
69 | typedef struct {
|
---|
70 | int val;
|
---|
71 | } CRYPTO_REF_COUNT;
|
---|
72 |
|
---|
73 | static __inline__ int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
74 | {
|
---|
75 | *ret = __atomic_fetch_add(&refcnt->val, 1, __ATOMIC_RELAXED) + 1;
|
---|
76 | return 1;
|
---|
77 | }
|
---|
78 |
|
---|
79 | static __inline__ int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
80 | {
|
---|
81 | *ret = __atomic_fetch_sub(&refcnt->val, 1, __ATOMIC_RELAXED) - 1;
|
---|
82 | if (*ret == 0)
|
---|
83 | __atomic_thread_fence(__ATOMIC_ACQUIRE);
|
---|
84 | return 1;
|
---|
85 | }
|
---|
86 |
|
---|
87 | static __inline__ int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
88 | {
|
---|
89 | *ret = __atomic_load_n(&refcnt->val, __ATOMIC_RELAXED);
|
---|
90 | return 1;
|
---|
91 | }
|
---|
92 |
|
---|
93 | # elif defined(__ICL) && defined(_WIN32)
|
---|
94 | # define HAVE_ATOMICS 1
|
---|
95 |
|
---|
96 | typedef struct {
|
---|
97 | volatile int val;
|
---|
98 | } CRYPTO_REF_COUNT;
|
---|
99 |
|
---|
100 | static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
101 | {
|
---|
102 | *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 1) + 1;
|
---|
103 | return 1;
|
---|
104 | }
|
---|
105 |
|
---|
106 | static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
107 | {
|
---|
108 | *ret = _InterlockedExchangeAdd((void *)&refcnt->val, -1) - 1;
|
---|
109 | return 1;
|
---|
110 | }
|
---|
111 |
|
---|
112 | static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
113 | {
|
---|
114 | *ret = _InterlockedOr((void *)&refcnt->val, 0);
|
---|
115 | return 1;
|
---|
116 | }
|
---|
117 |
|
---|
118 | # elif defined(_MSC_VER) && _MSC_VER>=1200
|
---|
119 |
|
---|
120 | # define HAVE_ATOMICS 1
|
---|
121 |
|
---|
122 | typedef struct {
|
---|
123 | volatile int val;
|
---|
124 | } CRYPTO_REF_COUNT;
|
---|
125 |
|
---|
126 | # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
|
---|
127 | # include <intrin.h>
|
---|
128 | # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
|
---|
129 | # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
|
---|
130 | # endif
|
---|
131 |
|
---|
132 | static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
133 | {
|
---|
134 | *ret = _InterlockedExchangeAdd_nf(&refcnt->val, 1) + 1;
|
---|
135 | return 1;
|
---|
136 | }
|
---|
137 |
|
---|
138 | static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
139 | {
|
---|
140 | *ret = _InterlockedExchangeAdd_nf(&refcnt->val, -1) - 1;
|
---|
141 | if (*ret == 0)
|
---|
142 | __dmb(_ARM_BARRIER_ISH);
|
---|
143 | return 1;
|
---|
144 | }
|
---|
145 |
|
---|
146 | static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
147 | {
|
---|
148 | *ret = _InterlockedOr_nf((void *)&refcnt->val, 0);
|
---|
149 | return 1;
|
---|
150 | }
|
---|
151 |
|
---|
152 | # else
|
---|
153 | # if !defined(_WIN32_WCE)
|
---|
154 | # pragma intrinsic(_InterlockedExchangeAdd)
|
---|
155 | # else
|
---|
156 | # if _WIN32_WCE >= 0x600
|
---|
157 | extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
|
---|
158 | # else
|
---|
159 | /* under Windows CE we still have old-style Interlocked* functions */
|
---|
160 | extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
|
---|
161 | # define _InterlockedExchangeAdd InterlockedExchangeAdd
|
---|
162 | # endif
|
---|
163 | # endif
|
---|
164 |
|
---|
165 | static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
166 | {
|
---|
167 | *ret = _InterlockedExchangeAdd(&refcnt->val, 1) + 1;
|
---|
168 | return 1;
|
---|
169 | }
|
---|
170 |
|
---|
171 | static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
172 | {
|
---|
173 | *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
|
---|
174 | return 1;
|
---|
175 | }
|
---|
176 |
|
---|
177 | static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
|
---|
178 | {
|
---|
179 | *ret = _InterlockedExchangeAdd(&refcnt->val, 0);
|
---|
180 | return 1;
|
---|
181 | }
|
---|
182 |
|
---|
183 | # endif
|
---|
184 |
|
---|
185 | # endif
|
---|
186 | # endif /* !OPENSSL_DEV_NO_ATOMICS */
|
---|
187 |
|
---|
188 | /*
|
---|
189 | * All the refcounting implementations above define HAVE_ATOMICS, so if it's
|
---|
190 | * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
|
---|
191 | * means we need to implement a fallback. This fallback uses locks.
|
---|
192 | */
|
---|
193 | # ifndef HAVE_ATOMICS
|
---|
194 |
|
---|
195 | typedef struct {
|
---|
196 | int val;
|
---|
197 | # ifdef OPENSSL_THREADS
|
---|
198 | CRYPTO_RWLOCK *lock;
|
---|
199 | # endif
|
---|
200 | } CRYPTO_REF_COUNT;
|
---|
201 |
|
---|
202 | # ifdef OPENSSL_THREADS
|
---|
203 |
|
---|
204 | static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
205 | int *ret)
|
---|
206 | {
|
---|
207 | return CRYPTO_atomic_add(&refcnt->val, 1, ret, refcnt->lock);
|
---|
208 | }
|
---|
209 |
|
---|
210 | static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
211 | int *ret)
|
---|
212 | {
|
---|
213 | return CRYPTO_atomic_add(&refcnt->val, -1, ret, refcnt->lock);
|
---|
214 | }
|
---|
215 |
|
---|
216 | static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
217 | int *ret)
|
---|
218 | {
|
---|
219 | return CRYPTO_atomic_load_int(&refcnt->val, ret, refcnt->lock);
|
---|
220 | }
|
---|
221 |
|
---|
222 | # define CRYPTO_NEW_FREE_DEFINED 1
|
---|
223 | static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
|
---|
224 | {
|
---|
225 | refcnt->val = n;
|
---|
226 | refcnt->lock = CRYPTO_THREAD_lock_new();
|
---|
227 | if (refcnt->lock == NULL) {
|
---|
228 | ERR_raise(ERR_LIB_CRYPTO, ERR_R_CRYPTO_LIB);
|
---|
229 | return 0;
|
---|
230 | }
|
---|
231 | return 1;
|
---|
232 | }
|
---|
233 |
|
---|
234 | static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
|
---|
235 | {
|
---|
236 | if (refcnt != NULL)
|
---|
237 | CRYPTO_THREAD_lock_free(refcnt->lock);
|
---|
238 | }
|
---|
239 |
|
---|
240 | # else /* OPENSSL_THREADS */
|
---|
241 |
|
---|
242 | static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
243 | int *ret)
|
---|
244 | {
|
---|
245 | refcnt->val++;
|
---|
246 | *ret = refcnt->val;
|
---|
247 | return 1;
|
---|
248 | }
|
---|
249 |
|
---|
250 | static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
251 | int *ret)
|
---|
252 | {
|
---|
253 | refcnt->val--;
|
---|
254 | *ret = refcnt->val;
|
---|
255 | return 1;
|
---|
256 | }
|
---|
257 |
|
---|
258 | static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
|
---|
259 | int *ret)
|
---|
260 | {
|
---|
261 | *ret = refcnt->val;
|
---|
262 | return 1;
|
---|
263 | }
|
---|
264 |
|
---|
265 | # endif /* OPENSSL_THREADS */
|
---|
266 | # endif
|
---|
267 |
|
---|
268 | # ifndef CRYPTO_NEW_FREE_DEFINED
|
---|
269 | static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
|
---|
270 | {
|
---|
271 | refcnt->val = n;
|
---|
272 | return 1;
|
---|
273 | }
|
---|
274 |
|
---|
275 | static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
|
---|
276 | {
|
---|
277 | }
|
---|
278 | # endif /* CRYPTO_NEW_FREE_DEFINED */
|
---|
279 | #undef CRYPTO_NEW_FREE_DEFINED
|
---|
280 |
|
---|
281 | # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
|
---|
282 | # define REF_ASSERT_ISNT(test) \
|
---|
283 | (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
|
---|
284 | # else
|
---|
285 | # define REF_ASSERT_ISNT(i)
|
---|
286 | # endif
|
---|
287 |
|
---|
288 | # define REF_PRINT_EX(text, count, object) \
|
---|
289 | OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
|
---|
290 | # define REF_PRINT_COUNT(text, object) \
|
---|
291 | REF_PRINT_EX(text, object->references.val, (void *)object)
|
---|
292 |
|
---|
293 | #endif
|
---|