VirtualBox

source: vbox/trunk/src/libs/openssl-3.3.2/crypto/threads_pthread.c@ 108358

最後變更 在這個檔案從108358是 108206,由 vboxsync 提交於 5 週 前

openssl-3.3.2: Exported all files to OSE and removed .scm-settings ​bugref:10757

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 27.3 KB
 
1/*
2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10/* We need to use the OPENSSL_fork_*() deprecated APIs */
11#define OPENSSL_SUPPRESS_DEPRECATED
12
13#include <openssl/crypto.h>
14#include <crypto/cryptlib.h>
15#include "internal/cryptlib.h"
16#include "internal/rcu.h"
17#include "rcu_internal.h"
18
19#if defined(__sun)
20# include <atomic.h>
21#endif
22
23#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24/*
25 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28 * All of this makes impossible to use __atomic_is_lock_free here.
29 *
30 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31 */
32# define BROKEN_CLANG_ATOMICS
33#endif
34
35#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
37# if defined(OPENSSL_SYS_UNIX)
38# include <sys/types.h>
39# include <unistd.h>
40# endif
41
42# include <assert.h>
43
44# ifdef PTHREAD_RWLOCK_INITIALIZER
45# define USE_RWLOCK
46# endif
47
48/*
49 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
50 * other compilers.
51
52 * Unfortunately, we can't do that with some "generic type", because there's no
53 * guarantee that the chosen generic type is large enough to cover all cases.
54 * Therefore, we implement fallbacks for each applicable type, with composed
55 * names that include the type they handle.
56 *
57 * (an anecdote: we previously tried to use |void *| as the generic type, with
58 * the thought that the pointer itself is the largest type. However, this is
59 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
60 *
61 * All applicable ATOMIC_ macros take the intended type as first parameter, so
62 * they can map to the correct fallback function. In the GNU/clang case, that
63 * parameter is simply ignored.
64 */
65
66/*
67 * Internal types used with the ATOMIC_ macros, to make it possible to compose
68 * fallback function names.
69 */
70typedef void *pvoid;
71typedef struct rcu_cb_item *prcu_cb_item;
72
73# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
74 && !defined(USE_ATOMIC_FALLBACKS)
75# if defined(__APPLE__) && defined(__clang__) && defined(__aarch64__)
76/*
77 * For pointers, Apple M1 virtualized cpu seems to have some problem using the
78 * ldapr instruction (see https://github.com/openssl/openssl/pull/23974)
79 * When using the native apple clang compiler, this instruction is emitted for
80 * atomic loads, which is bad. So, if
81 * 1) We are building on a target that defines __APPLE__ AND
82 * 2) We are building on a target using clang (__clang__) AND
83 * 3) We are building for an M1 processor (__aarch64__)
84 * Then we shold not use __atomic_load_n and instead implement our own
85 * function to issue the ldar instruction instead, which procuces the proper
86 * sequencing guarantees
87 */
88static inline void *apple_atomic_load_n_pvoid(void **p,
89 ossl_unused int memorder)
90{
91 void *ret;
92
93 __asm volatile("ldar %0, [%1]" : "=r" (ret): "r" (p):);
94
95 return ret;
96}
97
98/* For uint64_t, we should be fine, though */
99# define apple_atomic_load_n_uint64_t(p, o) __atomic_load_n(p, o)
100
101# define ATOMIC_LOAD_N(t, p, o) apple_atomic_load_n_##t(p, o)
102# else
103# define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
104# endif
105# define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
106# define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
107# define ATOMIC_EXCHANGE_N(t, p, v, o) __atomic_exchange_n(p, v, o)
108# define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
109# define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
110# define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
111# define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
112# define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
113# else
114static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
115
116# define IMPL_fallback_atomic_load_n(t) \
117 static ossl_inline t fallback_atomic_load_n_##t(t *p) \
118 { \
119 t ret; \
120 \
121 pthread_mutex_lock(&atomic_sim_lock); \
122 ret = *p; \
123 pthread_mutex_unlock(&atomic_sim_lock); \
124 return ret; \
125 }
126IMPL_fallback_atomic_load_n(uint64_t)
127IMPL_fallback_atomic_load_n(pvoid)
128
129# define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
130
131# define IMPL_fallback_atomic_store_n(t) \
132 static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
133 { \
134 t ret; \
135 \
136 pthread_mutex_lock(&atomic_sim_lock); \
137 ret = *p; \
138 *p = v; \
139 pthread_mutex_unlock(&atomic_sim_lock); \
140 return ret; \
141 }
142IMPL_fallback_atomic_store_n(uint64_t)
143
144# define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
145
146# define IMPL_fallback_atomic_store(t) \
147 static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
148 { \
149 pthread_mutex_lock(&atomic_sim_lock); \
150 *p = *v; \
151 pthread_mutex_unlock(&atomic_sim_lock); \
152 }
153IMPL_fallback_atomic_store(uint64_t)
154IMPL_fallback_atomic_store(pvoid)
155
156# define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
157
158# define IMPL_fallback_atomic_exchange_n(t) \
159 static ossl_inline t fallback_atomic_exchange_n_##t(t *p, t v) \
160 { \
161 t ret; \
162 \
163 pthread_mutex_lock(&atomic_sim_lock); \
164 ret = *p; \
165 *p = v; \
166 pthread_mutex_unlock(&atomic_sim_lock); \
167 return ret; \
168 }
169IMPL_fallback_atomic_exchange_n(uint64_t)
170IMPL_fallback_atomic_exchange_n(prcu_cb_item)
171
172# define ATOMIC_EXCHANGE_N(t, p, v, o) fallback_atomic_exchange_n_##t(p, v)
173
174/*
175 * The fallbacks that follow don't need any per type implementation, as
176 * they are designed for uint64_t only. If there comes a time when multiple
177 * types need to be covered, it's relatively easy to refactor them the same
178 * way as the fallbacks above.
179 */
180
181static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
182{
183 uint64_t ret;
184
185 pthread_mutex_lock(&atomic_sim_lock);
186 *p += v;
187 ret = *p;
188 pthread_mutex_unlock(&atomic_sim_lock);
189 return ret;
190}
191
192# define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
193
194static ossl_inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
195{
196 uint64_t ret;
197
198 pthread_mutex_lock(&atomic_sim_lock);
199 ret = *p;
200 *p += v;
201 pthread_mutex_unlock(&atomic_sim_lock);
202 return ret;
203}
204
205# define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
206
207static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
208{
209 uint64_t ret;
210
211 pthread_mutex_lock(&atomic_sim_lock);
212 *p -= v;
213 ret = *p;
214 pthread_mutex_unlock(&atomic_sim_lock);
215 return ret;
216}
217
218# define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
219
220static ossl_inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
221{
222 uint64_t ret;
223
224 pthread_mutex_lock(&atomic_sim_lock);
225 *p &= m;
226 ret = *p;
227 pthread_mutex_unlock(&atomic_sim_lock);
228 return ret;
229}
230
231# define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
232
233static ossl_inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
234{
235 uint64_t ret;
236
237 pthread_mutex_lock(&atomic_sim_lock);
238 *p |= m;
239 ret = *p;
240 pthread_mutex_unlock(&atomic_sim_lock);
241 return ret;
242}
243
244# define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
245# endif
246
247/*
248 * users is broken up into 2 parts
249 * bits 0-15 current readers
250 * bit 32-63 - ID
251 */
252# define READER_SHIFT 0
253# define ID_SHIFT 32
254# define READER_SIZE 16
255# define ID_SIZE 32
256
257# define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
258# define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
259# define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
260# define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
261# define VAL_READER ((uint64_t)1 << READER_SHIFT)
262# define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
263
264/*
265 * This is the core of an rcu lock. It tracks the readers and writers for the
266 * current quiescence point for a given lock. Users is the 64 bit value that
267 * stores the READERS/ID as defined above
268 *
269 */
270struct rcu_qp {
271 uint64_t users;
272};
273
274struct thread_qp {
275 struct rcu_qp *qp;
276 unsigned int depth;
277 CRYPTO_RCU_LOCK *lock;
278};
279
280# define MAX_QPS 10
281/*
282 * This is the per thread tracking data
283 * that is assigned to each thread participating
284 * in an rcu qp
285 *
286 * qp points to the qp that it last acquired
287 *
288 */
289struct rcu_thr_data {
290 struct thread_qp thread_qps[MAX_QPS];
291};
292
293/*
294 * This is the internal version of a CRYPTO_RCU_LOCK
295 * it is cast from CRYPTO_RCU_LOCK
296 */
297struct rcu_lock_st {
298 /* Callbacks to call for next ossl_synchronize_rcu */
299 struct rcu_cb_item *cb_items;
300
301 /* The context we are being created against */
302 OSSL_LIB_CTX *ctx;
303
304 /* rcu generation counter for in-order retirement */
305 uint32_t id_ctr;
306
307 /* Array of quiescent points for synchronization */
308 struct rcu_qp *qp_group;
309
310 /* Number of elements in qp_group array */
311 size_t group_count;
312
313 /* Index of the current qp in the qp_group array */
314 uint64_t reader_idx;
315
316 /* value of the next id_ctr value to be retired */
317 uint32_t next_to_retire;
318
319 /* index of the next free rcu_qp in the qp_group */
320 uint64_t current_alloc_idx;
321
322 /* number of qp's in qp_group array currently being retired */
323 uint32_t writers_alloced;
324
325 /* lock protecting write side operations */
326 pthread_mutex_t write_lock;
327
328 /* lock protecting updates to writers_alloced/current_alloc_idx */
329 pthread_mutex_t alloc_lock;
330
331 /* signal to wake threads waiting on alloc_lock */
332 pthread_cond_t alloc_signal;
333
334 /* lock to enforce in-order retirement */
335 pthread_mutex_t prior_lock;
336
337 /* signal to wake threads waiting on prior_lock */
338 pthread_cond_t prior_signal;
339};
340
341/* Read side acquisition of the current qp */
342static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
343{
344 uint64_t qp_idx;
345
346 /* get the current qp index */
347 for (;;) {
348 /*
349 * Notes on use of __ATOMIC_ACQUIRE
350 * We need to ensure the following:
351 * 1) That subsequent operations aren't optimized by hoisting them above
352 * this operation. Specifically, we don't want the below re-load of
353 * qp_idx to get optimized away
354 * 2) We want to ensure that any updating of reader_idx on the write side
355 * of the lock is flushed from a local cpu cache so that we see any
356 * updates prior to the load. This is a non-issue on cache coherent
357 * systems like x86, but is relevant on other arches
358 * Note: This applies to the reload below as well
359 */
360 qp_idx = ATOMIC_LOAD_N(uint64_t, &lock->reader_idx, __ATOMIC_ACQUIRE);
361
362 /*
363 * Notes of use of __ATOMIC_RELEASE
364 * This counter is only read by the write side of the lock, and so we
365 * specify __ATOMIC_RELEASE here to ensure that the write side of the
366 * lock see this during the spin loop read of users, as it waits for the
367 * reader count to approach zero
368 */
369 ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
370 __ATOMIC_RELEASE);
371
372 /* if the idx hasn't changed, we're good, else try again */
373 if (qp_idx == ATOMIC_LOAD_N(uint64_t, &lock->reader_idx, __ATOMIC_ACQUIRE))
374 break;
375
376 /*
377 * Notes on use of __ATOMIC_RELEASE
378 * As with the add above, we want to ensure that this decrement is
379 * seen by the write side of the lock as soon as it happens to prevent
380 * undue spinning waiting for write side completion
381 */
382 ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
383 __ATOMIC_RELEASE);
384 }
385
386 return &lock->qp_group[qp_idx];
387}
388
389static void ossl_rcu_free_local_data(void *arg)
390{
391 OSSL_LIB_CTX *ctx = arg;
392 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
393 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
394
395 OPENSSL_free(data);
396 CRYPTO_THREAD_set_local(lkey, NULL);
397}
398
399void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
400{
401 struct rcu_thr_data *data;
402 int i, available_qp = -1;
403 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
404
405 /*
406 * we're going to access current_qp here so ask the
407 * processor to fetch it
408 */
409 data = CRYPTO_THREAD_get_local(lkey);
410
411 if (data == NULL) {
412 data = OPENSSL_zalloc(sizeof(*data));
413 OPENSSL_assert(data != NULL);
414 CRYPTO_THREAD_set_local(lkey, data);
415 ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
416 }
417
418 for (i = 0; i < MAX_QPS; i++) {
419 if (data->thread_qps[i].qp == NULL && available_qp == -1)
420 available_qp = i;
421 /* If we have a hold on this lock already, we're good */
422 if (data->thread_qps[i].lock == lock) {
423 data->thread_qps[i].depth++;
424 return;
425 }
426 }
427
428 /*
429 * if we get here, then we don't have a hold on this lock yet
430 */
431 assert(available_qp != -1);
432
433 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
434 data->thread_qps[available_qp].depth = 1;
435 data->thread_qps[available_qp].lock = lock;
436}
437
438void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
439{
440 int i;
441 CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
442 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
443 uint64_t ret;
444
445 assert(data != NULL);
446
447 for (i = 0; i < MAX_QPS; i++) {
448 if (data->thread_qps[i].lock == lock) {
449 /*
450 * As with read side acquisition, we use __ATOMIC_RELEASE here
451 * to ensure that the decrement is published immediately
452 * to any write side waiters
453 */
454 data->thread_qps[i].depth--;
455 if (data->thread_qps[i].depth == 0) {
456 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
457 __ATOMIC_RELEASE);
458 OPENSSL_assert(ret != UINT64_MAX);
459 data->thread_qps[i].qp = NULL;
460 data->thread_qps[i].lock = NULL;
461 }
462 return;
463 }
464 }
465 /*
466 * If we get here, we're trying to unlock a lock that we never acquired -
467 * that's fatal.
468 */
469 assert(0);
470}
471
472/*
473 * Write side allocation routine to get the current qp
474 * and replace it with a new one
475 */
476static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
477{
478 uint64_t new_id;
479 uint64_t current_idx;
480
481 pthread_mutex_lock(&lock->alloc_lock);
482
483 /*
484 * we need at least one qp to be available with one
485 * left over, so that readers can start working on
486 * one that isn't yet being waited on
487 */
488 while (lock->group_count - lock->writers_alloced < 2)
489 /* we have to wait for one to be free */
490 pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
491
492 current_idx = lock->current_alloc_idx;
493
494 /* Allocate the qp */
495 lock->writers_alloced++;
496
497 /* increment the allocation index */
498 lock->current_alloc_idx =
499 (lock->current_alloc_idx + 1) % lock->group_count;
500
501 /* get and insert a new id */
502 new_id = lock->id_ctr;
503 lock->id_ctr++;
504
505 new_id = VAL_ID(new_id);
506 /*
507 * Even though we are under a write side lock here
508 * We need to use atomic instructions to ensure that the results
509 * of this update are published to the read side prior to updating the
510 * reader idx below
511 */
512 ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
513 __ATOMIC_RELEASE);
514 ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
515 __ATOMIC_RELEASE);
516
517 /*
518 * Update the reader index to be the prior qp.
519 * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
520 * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we want any publication
521 * of this value to be seen on the read side immediately after it happens
522 */
523 ATOMIC_STORE_N(uint64_t, &lock->reader_idx, lock->current_alloc_idx,
524 __ATOMIC_RELEASE);
525
526 /* wake up any waiters */
527 pthread_cond_signal(&lock->alloc_signal);
528 pthread_mutex_unlock(&lock->alloc_lock);
529 return &lock->qp_group[current_idx];
530}
531
532static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
533{
534 pthread_mutex_lock(&lock->alloc_lock);
535 lock->writers_alloced--;
536 pthread_cond_signal(&lock->alloc_signal);
537 pthread_mutex_unlock(&lock->alloc_lock);
538}
539
540static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
541 int count)
542{
543 struct rcu_qp *new =
544 OPENSSL_zalloc(sizeof(*new) * count);
545
546 lock->group_count = count;
547 return new;
548}
549
550void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
551{
552 pthread_mutex_lock(&lock->write_lock);
553}
554
555void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
556{
557 pthread_mutex_unlock(&lock->write_lock);
558}
559
560void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
561{
562 struct rcu_qp *qp;
563 uint64_t count;
564 struct rcu_cb_item *cb_items, *tmpcb;
565
566 /*
567 * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
568 * writes before we read, and publish our write immediately
569 */
570 cb_items = ATOMIC_EXCHANGE_N(prcu_cb_item, &lock->cb_items, NULL,
571 __ATOMIC_ACQ_REL);
572
573 qp = update_qp(lock);
574
575 /*
576 * wait for the reader count to reach zero
577 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
578 * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
579 * is visible prior to our read
580 */
581 do {
582 count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
583 } while (READER_COUNT(count) != 0);
584
585 /* retire in order */
586 pthread_mutex_lock(&lock->prior_lock);
587 while (lock->next_to_retire != ID_VAL(count))
588 pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
589 lock->next_to_retire++;
590 pthread_cond_broadcast(&lock->prior_signal);
591 pthread_mutex_unlock(&lock->prior_lock);
592
593 retire_qp(lock, qp);
594
595 /* handle any callbacks that we have */
596 while (cb_items != NULL) {
597 tmpcb = cb_items;
598 cb_items = cb_items->next;
599 tmpcb->fn(tmpcb->data);
600 OPENSSL_free(tmpcb);
601 }
602}
603
604int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
605{
606 struct rcu_cb_item *new =
607 OPENSSL_zalloc(sizeof(*new));
608
609 if (new == NULL)
610 return 0;
611
612 new->data = data;
613 new->fn = cb;
614 /*
615 * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
616 * list are visible to us prior to reading, and publish the new value
617 * immediately
618 */
619 new->next = ATOMIC_EXCHANGE_N(prcu_cb_item, &lock->cb_items, new,
620 __ATOMIC_ACQ_REL);
621
622 return 1;
623}
624
625void *ossl_rcu_uptr_deref(void **p)
626{
627 return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
628}
629
630void ossl_rcu_assign_uptr(void **p, void **v)
631{
632 ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
633}
634
635CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
636{
637 struct rcu_lock_st *new;
638
639 if (num_writers < 1)
640 num_writers = 1;
641
642 ctx = ossl_lib_ctx_get_concrete(ctx);
643 if (ctx == NULL)
644 return 0;
645
646 new = OPENSSL_zalloc(sizeof(*new));
647 if (new == NULL)
648 return NULL;
649
650 new->ctx = ctx;
651 pthread_mutex_init(&new->write_lock, NULL);
652 pthread_mutex_init(&new->prior_lock, NULL);
653 pthread_mutex_init(&new->alloc_lock, NULL);
654 pthread_cond_init(&new->prior_signal, NULL);
655 pthread_cond_init(&new->alloc_signal, NULL);
656 new->qp_group = allocate_new_qp_group(new, num_writers + 1);
657 if (new->qp_group == NULL) {
658 OPENSSL_free(new);
659 new = NULL;
660 }
661 return new;
662}
663
664void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
665{
666 struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
667
668 if (lock == NULL)
669 return;
670
671 /* make sure we're synchronized */
672 ossl_synchronize_rcu(rlock);
673
674 OPENSSL_free(rlock->qp_group);
675 /* There should only be a single qp left now */
676 OPENSSL_free(rlock);
677}
678
679CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
680{
681# ifdef USE_RWLOCK
682 CRYPTO_RWLOCK *lock;
683
684 if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
685 /* Don't set error, to avoid recursion blowup. */
686 return NULL;
687
688 if (pthread_rwlock_init(lock, NULL) != 0) {
689 OPENSSL_free(lock);
690 return NULL;
691 }
692# else
693 pthread_mutexattr_t attr;
694 CRYPTO_RWLOCK *lock;
695
696 if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
697 /* Don't set error, to avoid recursion blowup. */
698 return NULL;
699
700 /*
701 * We don't use recursive mutexes, but try to catch errors if we do.
702 */
703 pthread_mutexattr_init(&attr);
704# if !defined (__TANDEM) && !defined (_SPT_MODEL_)
705# if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
706 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
707# endif
708# else
709 /* The SPT Thread Library does not define MUTEX attributes. */
710# endif
711
712 if (pthread_mutex_init(lock, &attr) != 0) {
713 pthread_mutexattr_destroy(&attr);
714 OPENSSL_free(lock);
715 return NULL;
716 }
717
718 pthread_mutexattr_destroy(&attr);
719# endif
720
721 return lock;
722}
723
724__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
725{
726# ifdef USE_RWLOCK
727 if (pthread_rwlock_rdlock(lock) != 0)
728 return 0;
729# else
730 if (pthread_mutex_lock(lock) != 0) {
731 assert(errno != EDEADLK && errno != EBUSY);
732 return 0;
733 }
734# endif
735
736 return 1;
737}
738
739__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
740{
741# ifdef USE_RWLOCK
742 if (pthread_rwlock_wrlock(lock) != 0)
743 return 0;
744# else
745 if (pthread_mutex_lock(lock) != 0) {
746 assert(errno != EDEADLK && errno != EBUSY);
747 return 0;
748 }
749# endif
750
751 return 1;
752}
753
754int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
755{
756# ifdef USE_RWLOCK
757 if (pthread_rwlock_unlock(lock) != 0)
758 return 0;
759# else
760 if (pthread_mutex_unlock(lock) != 0) {
761 assert(errno != EPERM);
762 return 0;
763 }
764# endif
765
766 return 1;
767}
768
769void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
770{
771 if (lock == NULL)
772 return;
773
774# ifdef USE_RWLOCK
775 pthread_rwlock_destroy(lock);
776# else
777 pthread_mutex_destroy(lock);
778# endif
779 OPENSSL_free(lock);
780
781 return;
782}
783
784int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
785{
786 if (pthread_once(once, init) != 0)
787 return 0;
788
789 return 1;
790}
791
792int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
793{
794 if (pthread_key_create(key, cleanup) != 0)
795 return 0;
796
797 return 1;
798}
799
800void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
801{
802 return pthread_getspecific(*key);
803}
804
805int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
806{
807 if (pthread_setspecific(*key, val) != 0)
808 return 0;
809
810 return 1;
811}
812
813int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
814{
815 if (pthread_key_delete(*key) != 0)
816 return 0;
817
818 return 1;
819}
820
821CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
822{
823 return pthread_self();
824}
825
826int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
827{
828 return pthread_equal(a, b);
829}
830
831int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
832{
833# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
834 if (__atomic_is_lock_free(sizeof(*val), val)) {
835 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
836 return 1;
837 }
838# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
839 /* This will work for all future Solaris versions. */
840 if (ret != NULL) {
841 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
842 return 1;
843 }
844# endif
845 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
846 return 0;
847
848 *val += amount;
849 *ret = *val;
850
851 if (!CRYPTO_THREAD_unlock(lock))
852 return 0;
853
854 return 1;
855}
856
857int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
858 CRYPTO_RWLOCK *lock)
859{
860# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
861 if (__atomic_is_lock_free(sizeof(*val), val)) {
862 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
863 return 1;
864 }
865# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
866 /* This will work for all future Solaris versions. */
867 if (ret != NULL) {
868 *ret = atomic_or_64_nv(val, op);
869 return 1;
870 }
871# endif
872 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
873 return 0;
874 *val |= op;
875 *ret = *val;
876
877 if (!CRYPTO_THREAD_unlock(lock))
878 return 0;
879
880 return 1;
881}
882
883int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
884{
885# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
886 if (__atomic_is_lock_free(sizeof(*val), val)) {
887 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
888 return 1;
889 }
890# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
891 /* This will work for all future Solaris versions. */
892 if (ret != NULL) {
893 *ret = atomic_or_64_nv(val, 0);
894 return 1;
895 }
896# endif
897 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
898 return 0;
899 *ret = *val;
900 if (!CRYPTO_THREAD_unlock(lock))
901 return 0;
902
903 return 1;
904}
905
906int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
907{
908# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
909 if (__atomic_is_lock_free(sizeof(*val), val)) {
910 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
911 return 1;
912 }
913# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
914 /* This will work for all future Solaris versions. */
915 if (ret != NULL) {
916 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
917 return 1;
918 }
919# endif
920 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
921 return 0;
922 *ret = *val;
923 if (!CRYPTO_THREAD_unlock(lock))
924 return 0;
925
926 return 1;
927}
928
929# ifndef FIPS_MODULE
930int openssl_init_fork_handlers(void)
931{
932 return 1;
933}
934# endif /* FIPS_MODULE */
935
936int openssl_get_fork_id(void)
937{
938 return getpid();
939}
940#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette