1 | /*
|
---|
2 | * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
|
---|
3 | *
|
---|
4 | * Licensed under the Apache License 2.0 (the "License"). You may not use
|
---|
5 | * this file except in compliance with the License. You can obtain a copy
|
---|
6 | * in the file LICENSE in the source distribution or at
|
---|
7 | * https://www.openssl.org/source/license.html
|
---|
8 | */
|
---|
9 |
|
---|
10 | #if defined(_WIN32)
|
---|
11 | # include <windows.h>
|
---|
12 | # if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
|
---|
13 | # define USE_RWLOCK
|
---|
14 | # endif
|
---|
15 | #endif
|
---|
16 | #include <assert.h>
|
---|
17 |
|
---|
18 | /*
|
---|
19 | * VC++ 2008 or earlier x86 compilers do not have an inline implementation
|
---|
20 | * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
|
---|
21 | * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
|
---|
22 | * To work around this problem, we implement a manual locking mechanism for
|
---|
23 | * only VC++ 2008 or earlier x86 compilers.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #if (defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600)
|
---|
27 | # define NO_INTERLOCKEDOR64
|
---|
28 | #endif
|
---|
29 |
|
---|
30 | #include <openssl/crypto.h>
|
---|
31 | #include <crypto/cryptlib.h>
|
---|
32 | #include "internal/common.h"
|
---|
33 | #include "internal/thread_arch.h"
|
---|
34 | #include "internal/rcu.h"
|
---|
35 | #include "rcu_internal.h"
|
---|
36 |
|
---|
37 | #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
|
---|
38 |
|
---|
39 | # ifdef USE_RWLOCK
|
---|
40 | typedef struct {
|
---|
41 | SRWLOCK lock;
|
---|
42 | int exclusive;
|
---|
43 | } CRYPTO_win_rwlock;
|
---|
44 | # endif
|
---|
45 |
|
---|
46 | # define READER_SHIFT 0
|
---|
47 | # define ID_SHIFT 32
|
---|
48 | # define READER_SIZE 32
|
---|
49 | # define ID_SIZE 32
|
---|
50 |
|
---|
51 | # define READER_MASK (((LONG64)1 << READER_SIZE)-1)
|
---|
52 | # define ID_MASK (((LONG64)1 << ID_SIZE)-1)
|
---|
53 | # define READER_COUNT(x) (((LONG64)(x) >> READER_SHIFT) & READER_MASK)
|
---|
54 | # define ID_VAL(x) (((LONG64)(x) >> ID_SHIFT) & ID_MASK)
|
---|
55 | # define VAL_READER ((LONG64)1 << READER_SHIFT)
|
---|
56 | # define VAL_ID(x) ((LONG64)x << ID_SHIFT)
|
---|
57 |
|
---|
58 | /*
|
---|
59 | * This defines a quescent point (qp)
|
---|
60 | * This is the barrier beyond which a writer
|
---|
61 | * must wait before freeing data that was
|
---|
62 | * atomically updated
|
---|
63 | */
|
---|
64 | struct rcu_qp {
|
---|
65 | volatile LONG64 users;
|
---|
66 | };
|
---|
67 |
|
---|
68 | struct thread_qp {
|
---|
69 | struct rcu_qp *qp;
|
---|
70 | unsigned int depth;
|
---|
71 | CRYPTO_RCU_LOCK *lock;
|
---|
72 | };
|
---|
73 |
|
---|
74 | #define MAX_QPS 10
|
---|
75 | /*
|
---|
76 | * This is the per thread tracking data
|
---|
77 | * that is assigned to each thread participating
|
---|
78 | * in an rcu qp
|
---|
79 | *
|
---|
80 | * qp points to the qp that it last acquired
|
---|
81 | *
|
---|
82 | */
|
---|
83 | struct rcu_thr_data {
|
---|
84 | struct thread_qp thread_qps[MAX_QPS];
|
---|
85 | };
|
---|
86 |
|
---|
87 | /*
|
---|
88 | * This is the internal version of a CRYPTO_RCU_LOCK
|
---|
89 | * it is cast from CRYPTO_RCU_LOCK
|
---|
90 | */
|
---|
91 | struct rcu_lock_st {
|
---|
92 | struct rcu_cb_item *cb_items;
|
---|
93 | OSSL_LIB_CTX *ctx;
|
---|
94 | uint32_t id_ctr;
|
---|
95 | struct rcu_qp *qp_group;
|
---|
96 | size_t group_count;
|
---|
97 | uint32_t next_to_retire;
|
---|
98 | volatile long int reader_idx;
|
---|
99 | uint32_t current_alloc_idx;
|
---|
100 | uint32_t writers_alloced;
|
---|
101 | CRYPTO_MUTEX *write_lock;
|
---|
102 | CRYPTO_MUTEX *alloc_lock;
|
---|
103 | CRYPTO_CONDVAR *alloc_signal;
|
---|
104 | CRYPTO_MUTEX *prior_lock;
|
---|
105 | CRYPTO_CONDVAR *prior_signal;
|
---|
106 | };
|
---|
107 |
|
---|
108 | static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
|
---|
109 | int count)
|
---|
110 | {
|
---|
111 | struct rcu_qp *new =
|
---|
112 | OPENSSL_zalloc(sizeof(*new) * count);
|
---|
113 |
|
---|
114 | lock->group_count = count;
|
---|
115 | return new;
|
---|
116 | }
|
---|
117 |
|
---|
118 | CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
|
---|
119 | {
|
---|
120 | struct rcu_lock_st *new;
|
---|
121 |
|
---|
122 | if (num_writers < 1)
|
---|
123 | num_writers = 1;
|
---|
124 |
|
---|
125 | ctx = ossl_lib_ctx_get_concrete(ctx);
|
---|
126 | if (ctx == NULL)
|
---|
127 | return 0;
|
---|
128 |
|
---|
129 | new = OPENSSL_zalloc(sizeof(*new));
|
---|
130 |
|
---|
131 | if (new == NULL)
|
---|
132 | return NULL;
|
---|
133 |
|
---|
134 | new->ctx = ctx;
|
---|
135 | new->write_lock = ossl_crypto_mutex_new();
|
---|
136 | new->alloc_signal = ossl_crypto_condvar_new();
|
---|
137 | new->prior_signal = ossl_crypto_condvar_new();
|
---|
138 | new->alloc_lock = ossl_crypto_mutex_new();
|
---|
139 | new->prior_lock = ossl_crypto_mutex_new();
|
---|
140 | new->qp_group = allocate_new_qp_group(new, num_writers + 1);
|
---|
141 | if (new->qp_group == NULL
|
---|
142 | || new->alloc_signal == NULL
|
---|
143 | || new->prior_signal == NULL
|
---|
144 | || new->write_lock == NULL
|
---|
145 | || new->alloc_lock == NULL
|
---|
146 | || new->prior_lock == NULL) {
|
---|
147 | OPENSSL_free(new->qp_group);
|
---|
148 | ossl_crypto_condvar_free(&new->alloc_signal);
|
---|
149 | ossl_crypto_condvar_free(&new->prior_signal);
|
---|
150 | ossl_crypto_mutex_free(&new->alloc_lock);
|
---|
151 | ossl_crypto_mutex_free(&new->prior_lock);
|
---|
152 | ossl_crypto_mutex_free(&new->write_lock);
|
---|
153 | OPENSSL_free(new);
|
---|
154 | new = NULL;
|
---|
155 | }
|
---|
156 | return new;
|
---|
157 |
|
---|
158 | }
|
---|
159 |
|
---|
160 | void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
|
---|
161 | {
|
---|
162 | OPENSSL_free(lock->qp_group);
|
---|
163 | ossl_crypto_condvar_free(&lock->alloc_signal);
|
---|
164 | ossl_crypto_condvar_free(&lock->prior_signal);
|
---|
165 | ossl_crypto_mutex_free(&lock->alloc_lock);
|
---|
166 | ossl_crypto_mutex_free(&lock->prior_lock);
|
---|
167 | ossl_crypto_mutex_free(&lock->write_lock);
|
---|
168 | OPENSSL_free(lock);
|
---|
169 | }
|
---|
170 |
|
---|
171 | static ossl_inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
|
---|
172 | {
|
---|
173 | uint32_t qp_idx;
|
---|
174 |
|
---|
175 | /* get the current qp index */
|
---|
176 | for (;;) {
|
---|
177 | qp_idx = InterlockedOr(&lock->reader_idx, 0);
|
---|
178 | InterlockedAdd64(&lock->qp_group[qp_idx].users, VAL_READER);
|
---|
179 | if (qp_idx == InterlockedOr(&lock->reader_idx, 0))
|
---|
180 | break;
|
---|
181 | InterlockedAdd64(&lock->qp_group[qp_idx].users, -VAL_READER);
|
---|
182 | }
|
---|
183 |
|
---|
184 | return &lock->qp_group[qp_idx];
|
---|
185 | }
|
---|
186 |
|
---|
187 | static void ossl_rcu_free_local_data(void *arg)
|
---|
188 | {
|
---|
189 | OSSL_LIB_CTX *ctx = arg;
|
---|
190 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
|
---|
191 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
|
---|
192 | OPENSSL_free(data);
|
---|
193 | CRYPTO_THREAD_set_local(lkey, NULL);
|
---|
194 | }
|
---|
195 |
|
---|
196 | void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
|
---|
197 | {
|
---|
198 | struct rcu_thr_data *data;
|
---|
199 | int i;
|
---|
200 | int available_qp = -1;
|
---|
201 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
|
---|
202 |
|
---|
203 | /*
|
---|
204 | * we're going to access current_qp here so ask the
|
---|
205 | * processor to fetch it
|
---|
206 | */
|
---|
207 | data = CRYPTO_THREAD_get_local(lkey);
|
---|
208 |
|
---|
209 | if (data == NULL) {
|
---|
210 | data = OPENSSL_zalloc(sizeof(*data));
|
---|
211 | OPENSSL_assert(data != NULL);
|
---|
212 | CRYPTO_THREAD_set_local(lkey, data);
|
---|
213 | ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
|
---|
214 | }
|
---|
215 |
|
---|
216 | for (i = 0; i < MAX_QPS; i++) {
|
---|
217 | if (data->thread_qps[i].qp == NULL && available_qp == -1)
|
---|
218 | available_qp = i;
|
---|
219 | /* If we have a hold on this lock already, we're good */
|
---|
220 | if (data->thread_qps[i].lock == lock)
|
---|
221 | return;
|
---|
222 | }
|
---|
223 |
|
---|
224 | /*
|
---|
225 | * if we get here, then we don't have a hold on this lock yet
|
---|
226 | */
|
---|
227 | assert(available_qp != -1);
|
---|
228 |
|
---|
229 | data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
|
---|
230 | data->thread_qps[available_qp].depth = 1;
|
---|
231 | data->thread_qps[available_qp].lock = lock;
|
---|
232 | }
|
---|
233 |
|
---|
234 | void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
|
---|
235 | {
|
---|
236 | ossl_crypto_mutex_lock(lock->write_lock);
|
---|
237 | }
|
---|
238 |
|
---|
239 | void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
|
---|
240 | {
|
---|
241 | ossl_crypto_mutex_unlock(lock->write_lock);
|
---|
242 | }
|
---|
243 |
|
---|
244 | void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
|
---|
245 | {
|
---|
246 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
|
---|
247 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
|
---|
248 | int i;
|
---|
249 | LONG64 ret;
|
---|
250 |
|
---|
251 | assert(data != NULL);
|
---|
252 |
|
---|
253 | for (i = 0; i < MAX_QPS; i++) {
|
---|
254 | if (data->thread_qps[i].lock == lock) {
|
---|
255 | data->thread_qps[i].depth--;
|
---|
256 | if (data->thread_qps[i].depth == 0) {
|
---|
257 | ret = InterlockedAdd64(&data->thread_qps[i].qp->users, -VAL_READER);
|
---|
258 | OPENSSL_assert(ret >= 0);
|
---|
259 | data->thread_qps[i].qp = NULL;
|
---|
260 | data->thread_qps[i].lock = NULL;
|
---|
261 | }
|
---|
262 | return;
|
---|
263 | }
|
---|
264 | }
|
---|
265 | }
|
---|
266 |
|
---|
267 | static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
|
---|
268 | {
|
---|
269 | uint64_t new_id;
|
---|
270 | uint32_t current_idx;
|
---|
271 | uint32_t tmp;
|
---|
272 |
|
---|
273 | ossl_crypto_mutex_lock(lock->alloc_lock);
|
---|
274 | /*
|
---|
275 | * we need at least one qp to be available with one
|
---|
276 | * left over, so that readers can start working on
|
---|
277 | * one that isn't yet being waited on
|
---|
278 | */
|
---|
279 | while (lock->group_count - lock->writers_alloced < 2)
|
---|
280 | ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock);
|
---|
281 |
|
---|
282 | current_idx = lock->current_alloc_idx;
|
---|
283 | /* Allocate the qp */
|
---|
284 | lock->writers_alloced++;
|
---|
285 |
|
---|
286 | /* increment the allocation index */
|
---|
287 | lock->current_alloc_idx =
|
---|
288 | (lock->current_alloc_idx + 1) % lock->group_count;
|
---|
289 |
|
---|
290 | /* get and insert a new id */
|
---|
291 | new_id = lock->id_ctr;
|
---|
292 | lock->id_ctr++;
|
---|
293 |
|
---|
294 | new_id = VAL_ID(new_id);
|
---|
295 | InterlockedAnd64(&lock->qp_group[current_idx].users, ID_MASK);
|
---|
296 | InterlockedAdd64(&lock->qp_group[current_idx].users, new_id);
|
---|
297 |
|
---|
298 | /* update the reader index to be the prior qp */
|
---|
299 | tmp = lock->current_alloc_idx;
|
---|
300 | InterlockedExchange(&lock->reader_idx, tmp);
|
---|
301 |
|
---|
302 | /* wake up any waiters */
|
---|
303 | ossl_crypto_condvar_broadcast(lock->alloc_signal);
|
---|
304 | ossl_crypto_mutex_unlock(lock->alloc_lock);
|
---|
305 | return &lock->qp_group[current_idx];
|
---|
306 | }
|
---|
307 |
|
---|
308 | static void retire_qp(CRYPTO_RCU_LOCK *lock,
|
---|
309 | struct rcu_qp *qp)
|
---|
310 | {
|
---|
311 | ossl_crypto_mutex_lock(lock->alloc_lock);
|
---|
312 | lock->writers_alloced--;
|
---|
313 | ossl_crypto_condvar_broadcast(lock->alloc_signal);
|
---|
314 | ossl_crypto_mutex_unlock(lock->alloc_lock);
|
---|
315 | }
|
---|
316 |
|
---|
317 |
|
---|
318 | void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
|
---|
319 | {
|
---|
320 | struct rcu_qp *qp;
|
---|
321 | uint64_t count;
|
---|
322 | struct rcu_cb_item *cb_items, *tmpcb;
|
---|
323 |
|
---|
324 | /* before we do anything else, lets grab the cb list */
|
---|
325 | cb_items = InterlockedExchangePointer((void * volatile *)&lock->cb_items, NULL);
|
---|
326 |
|
---|
327 | qp = update_qp(lock);
|
---|
328 |
|
---|
329 | /* wait for the reader count to reach zero */
|
---|
330 | do {
|
---|
331 | count = InterlockedOr64(&qp->users, 0);
|
---|
332 | } while (READER_COUNT(count) != 0);
|
---|
333 |
|
---|
334 | /* retire in order */
|
---|
335 | ossl_crypto_mutex_lock(lock->prior_lock);
|
---|
336 | while (lock->next_to_retire != ID_VAL(count))
|
---|
337 | ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock);
|
---|
338 |
|
---|
339 | lock->next_to_retire++;
|
---|
340 | ossl_crypto_condvar_broadcast(lock->prior_signal);
|
---|
341 | ossl_crypto_mutex_unlock(lock->prior_lock);
|
---|
342 |
|
---|
343 | retire_qp(lock, qp);
|
---|
344 |
|
---|
345 | /* handle any callbacks that we have */
|
---|
346 | while (cb_items != NULL) {
|
---|
347 | tmpcb = cb_items;
|
---|
348 | cb_items = cb_items->next;
|
---|
349 | tmpcb->fn(tmpcb->data);
|
---|
350 | OPENSSL_free(tmpcb);
|
---|
351 | }
|
---|
352 |
|
---|
353 | /* and we're done */
|
---|
354 | return;
|
---|
355 |
|
---|
356 | }
|
---|
357 |
|
---|
358 | int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
|
---|
359 | {
|
---|
360 | struct rcu_cb_item *new;
|
---|
361 | struct rcu_cb_item *prev;
|
---|
362 |
|
---|
363 | new = OPENSSL_zalloc(sizeof(struct rcu_cb_item));
|
---|
364 | if (new == NULL)
|
---|
365 | return 0;
|
---|
366 | prev = new;
|
---|
367 | new->data = data;
|
---|
368 | new->fn = cb;
|
---|
369 |
|
---|
370 | InterlockedExchangePointer((void * volatile *)&lock->cb_items, prev);
|
---|
371 | new->next = prev;
|
---|
372 | return 1;
|
---|
373 | }
|
---|
374 |
|
---|
375 | void *ossl_rcu_uptr_deref(void **p)
|
---|
376 | {
|
---|
377 | return (void *)*p;
|
---|
378 | }
|
---|
379 |
|
---|
380 | void ossl_rcu_assign_uptr(void **p, void **v)
|
---|
381 | {
|
---|
382 | InterlockedExchangePointer((void * volatile *)p, (void *)*v);
|
---|
383 | }
|
---|
384 |
|
---|
385 |
|
---|
386 | CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
|
---|
387 | {
|
---|
388 | CRYPTO_RWLOCK *lock;
|
---|
389 | # ifdef USE_RWLOCK
|
---|
390 | CRYPTO_win_rwlock *rwlock;
|
---|
391 |
|
---|
392 | if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL)
|
---|
393 | /* Don't set error, to avoid recursion blowup. */
|
---|
394 | return NULL;
|
---|
395 | rwlock = lock;
|
---|
396 | InitializeSRWLock(&rwlock->lock);
|
---|
397 | # else
|
---|
398 |
|
---|
399 | if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL)
|
---|
400 | /* Don't set error, to avoid recursion blowup. */
|
---|
401 | return NULL;
|
---|
402 |
|
---|
403 | # if !defined(_WIN32_WCE)
|
---|
404 | /* 0x400 is the spin count value suggested in the documentation */
|
---|
405 | if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) {
|
---|
406 | OPENSSL_free(lock);
|
---|
407 | return NULL;
|
---|
408 | }
|
---|
409 | # else
|
---|
410 | InitializeCriticalSection(lock);
|
---|
411 | # endif
|
---|
412 | # endif
|
---|
413 |
|
---|
414 | return lock;
|
---|
415 | }
|
---|
416 |
|
---|
417 | __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
|
---|
418 | {
|
---|
419 | # ifdef USE_RWLOCK
|
---|
420 | CRYPTO_win_rwlock *rwlock = lock;
|
---|
421 |
|
---|
422 | AcquireSRWLockShared(&rwlock->lock);
|
---|
423 | # else
|
---|
424 | EnterCriticalSection(lock);
|
---|
425 | # endif
|
---|
426 | return 1;
|
---|
427 | }
|
---|
428 |
|
---|
429 | __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
|
---|
430 | {
|
---|
431 | # ifdef USE_RWLOCK
|
---|
432 | CRYPTO_win_rwlock *rwlock = lock;
|
---|
433 |
|
---|
434 | AcquireSRWLockExclusive(&rwlock->lock);
|
---|
435 | rwlock->exclusive = 1;
|
---|
436 | # else
|
---|
437 | EnterCriticalSection(lock);
|
---|
438 | # endif
|
---|
439 | return 1;
|
---|
440 | }
|
---|
441 |
|
---|
442 | int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
|
---|
443 | {
|
---|
444 | # ifdef USE_RWLOCK
|
---|
445 | CRYPTO_win_rwlock *rwlock = lock;
|
---|
446 |
|
---|
447 | if (rwlock->exclusive) {
|
---|
448 | rwlock->exclusive = 0;
|
---|
449 | ReleaseSRWLockExclusive(&rwlock->lock);
|
---|
450 | } else {
|
---|
451 | ReleaseSRWLockShared(&rwlock->lock);
|
---|
452 | }
|
---|
453 | # else
|
---|
454 | LeaveCriticalSection(lock);
|
---|
455 | # endif
|
---|
456 | return 1;
|
---|
457 | }
|
---|
458 |
|
---|
459 | void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
|
---|
460 | {
|
---|
461 | if (lock == NULL)
|
---|
462 | return;
|
---|
463 |
|
---|
464 | # ifndef USE_RWLOCK
|
---|
465 | DeleteCriticalSection(lock);
|
---|
466 | # endif
|
---|
467 | OPENSSL_free(lock);
|
---|
468 |
|
---|
469 | return;
|
---|
470 | }
|
---|
471 |
|
---|
472 | # define ONCE_UNINITED 0
|
---|
473 | # define ONCE_ININIT 1
|
---|
474 | # define ONCE_DONE 2
|
---|
475 |
|
---|
476 | /*
|
---|
477 | * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
|
---|
478 | * we still have to support.
|
---|
479 | */
|
---|
480 | int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
|
---|
481 | {
|
---|
482 | LONG volatile *lock = (LONG *)once;
|
---|
483 | LONG result;
|
---|
484 |
|
---|
485 | if (*lock == ONCE_DONE)
|
---|
486 | return 1;
|
---|
487 |
|
---|
488 | do {
|
---|
489 | result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED);
|
---|
490 | if (result == ONCE_UNINITED) {
|
---|
491 | init();
|
---|
492 | *lock = ONCE_DONE;
|
---|
493 | return 1;
|
---|
494 | }
|
---|
495 | } while (result == ONCE_ININIT);
|
---|
496 |
|
---|
497 | return (*lock == ONCE_DONE);
|
---|
498 | }
|
---|
499 |
|
---|
500 | int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
|
---|
501 | {
|
---|
502 | *key = TlsAlloc();
|
---|
503 | if (*key == TLS_OUT_OF_INDEXES)
|
---|
504 | return 0;
|
---|
505 |
|
---|
506 | return 1;
|
---|
507 | }
|
---|
508 |
|
---|
509 | void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
|
---|
510 | {
|
---|
511 | DWORD last_error;
|
---|
512 | void *ret;
|
---|
513 |
|
---|
514 | /*
|
---|
515 | * TlsGetValue clears the last error even on success, so that callers may
|
---|
516 | * distinguish it successfully returning NULL or failing. It is documented
|
---|
517 | * to never fail if the argument is a valid index from TlsAlloc, so we do
|
---|
518 | * not need to handle this.
|
---|
519 | *
|
---|
520 | * However, this error-mangling behavior interferes with the caller's use of
|
---|
521 | * GetLastError. In particular SSL_get_error queries the error queue to
|
---|
522 | * determine whether the caller should look at the OS's errors. To avoid
|
---|
523 | * destroying state, save and restore the Windows error.
|
---|
524 | *
|
---|
525 | * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
|
---|
526 | */
|
---|
527 | last_error = GetLastError();
|
---|
528 | ret = TlsGetValue(*key);
|
---|
529 | SetLastError(last_error);
|
---|
530 | return ret;
|
---|
531 | }
|
---|
532 |
|
---|
533 | int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
|
---|
534 | {
|
---|
535 | if (TlsSetValue(*key, val) == 0)
|
---|
536 | return 0;
|
---|
537 |
|
---|
538 | return 1;
|
---|
539 | }
|
---|
540 |
|
---|
541 | int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
|
---|
542 | {
|
---|
543 | if (TlsFree(*key) == 0)
|
---|
544 | return 0;
|
---|
545 |
|
---|
546 | return 1;
|
---|
547 | }
|
---|
548 |
|
---|
549 | CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
|
---|
550 | {
|
---|
551 | return GetCurrentThreadId();
|
---|
552 | }
|
---|
553 |
|
---|
554 | int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
|
---|
555 | {
|
---|
556 | return (a == b);
|
---|
557 | }
|
---|
558 |
|
---|
559 | int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
|
---|
560 | {
|
---|
561 | *ret = (int)InterlockedExchangeAdd((long volatile *)val, (long)amount) + amount;
|
---|
562 | return 1;
|
---|
563 | }
|
---|
564 |
|
---|
565 | int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
|
---|
566 | CRYPTO_RWLOCK *lock)
|
---|
567 | {
|
---|
568 | #if (defined(NO_INTERLOCKEDOR64))
|
---|
569 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
|
---|
570 | return 0;
|
---|
571 | *val |= op;
|
---|
572 | *ret = *val;
|
---|
573 |
|
---|
574 | if (!CRYPTO_THREAD_unlock(lock))
|
---|
575 | return 0;
|
---|
576 |
|
---|
577 | return 1;
|
---|
578 | #else
|
---|
579 | *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
|
---|
580 | return 1;
|
---|
581 | #endif
|
---|
582 | }
|
---|
583 |
|
---|
584 | int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
|
---|
585 | {
|
---|
586 | #if (defined(NO_INTERLOCKEDOR64))
|
---|
587 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
|
---|
588 | return 0;
|
---|
589 | *ret = *val;
|
---|
590 | if (!CRYPTO_THREAD_unlock(lock))
|
---|
591 | return 0;
|
---|
592 |
|
---|
593 | return 1;
|
---|
594 | #else
|
---|
595 | *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
|
---|
596 | return 1;
|
---|
597 | #endif
|
---|
598 | }
|
---|
599 |
|
---|
600 | int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
|
---|
601 | {
|
---|
602 | #if (defined(NO_INTERLOCKEDOR64))
|
---|
603 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
|
---|
604 | return 0;
|
---|
605 | *ret = *val;
|
---|
606 | if (!CRYPTO_THREAD_unlock(lock))
|
---|
607 | return 0;
|
---|
608 |
|
---|
609 | return 1;
|
---|
610 | #else
|
---|
611 | /* On Windows, LONG is always the same size as int. */
|
---|
612 | *ret = (int)InterlockedOr((LONG volatile *)val, 0);
|
---|
613 | return 1;
|
---|
614 | #endif
|
---|
615 | }
|
---|
616 |
|
---|
617 | int openssl_init_fork_handlers(void)
|
---|
618 | {
|
---|
619 | return 0;
|
---|
620 | }
|
---|
621 |
|
---|
622 | int openssl_get_fork_id(void)
|
---|
623 | {
|
---|
624 | return 0;
|
---|
625 | }
|
---|
626 | #endif
|
---|