VirtualBox

source: vbox/trunk/src/libs/openssl-3.3.2/crypto/modes/ocb128.c@ 108358

最後變更 在這個檔案從108358是 108206,由 vboxsync 提交於 6 週 前

openssl-3.3.2: Exported all files to OSE and removed .scm-settings ​bugref:10757

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 16.2 KB
 
1/*
2 * Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include <string.h>
11#include <openssl/crypto.h>
12#include <openssl/err.h>
13#include "crypto/modes.h"
14
15#ifndef OPENSSL_NO_OCB
16
17/*
18 * Calculate the number of binary trailing zero's in any given number
19 */
20static u32 ocb_ntz(u64 n)
21{
22 u32 cnt = 0;
23
24 /*
25 * We do a right-to-left simple sequential search. This is surprisingly
26 * efficient as the distribution of trailing zeros is not uniform,
27 * e.g. the number of possible inputs with no trailing zeros is equal to
28 * the number with 1 or more; the number with exactly 1 is equal to the
29 * number with 2 or more, etc. Checking the last two bits covers 75% of
30 * all numbers. Checking the last three covers 87.5%
31 */
32 while (!(n & 1)) {
33 n >>= 1;
34 cnt++;
35 }
36 return cnt;
37}
38
39/*
40 * Shift a block of 16 bytes left by shift bits
41 */
42static void ocb_block_lshift(const unsigned char *in, size_t shift,
43 unsigned char *out)
44{
45 int i;
46 unsigned char carry = 0, carry_next;
47
48 for (i = 15; i >= 0; i--) {
49 carry_next = in[i] >> (8 - shift);
50 out[i] = (in[i] << shift) | carry;
51 carry = carry_next;
52 }
53}
54
55/*
56 * Perform a "double" operation as per OCB spec
57 */
58static void ocb_double(OCB_BLOCK *in, OCB_BLOCK *out)
59{
60 unsigned char mask;
61
62 /*
63 * Calculate the mask based on the most significant bit. There are more
64 * efficient ways to do this - but this way is constant time
65 */
66 mask = in->c[0] & 0x80;
67 mask >>= 7;
68 mask = (0 - mask) & 0x87;
69
70 ocb_block_lshift(in->c, 1, out->c);
71
72 out->c[15] ^= mask;
73}
74
75/*
76 * Perform an xor on in1 and in2 - each of len bytes. Store result in out
77 */
78static void ocb_block_xor(const unsigned char *in1,
79 const unsigned char *in2, size_t len,
80 unsigned char *out)
81{
82 size_t i;
83 for (i = 0; i < len; i++) {
84 out[i] = in1[i] ^ in2[i];
85 }
86}
87
88/*
89 * Lookup L_index in our lookup table. If we haven't already got it we need to
90 * calculate it
91 */
92static OCB_BLOCK *ocb_lookup_l(OCB128_CONTEXT *ctx, size_t idx)
93{
94 size_t l_index = ctx->l_index;
95
96 if (idx <= l_index) {
97 return ctx->l + idx;
98 }
99
100 /* We don't have it - so calculate it */
101 if (idx >= ctx->max_l_index) {
102 void *tmp_ptr;
103 /*
104 * Each additional entry allows to process almost double as
105 * much data, so that in linear world the table will need to
106 * be expanded with smaller and smaller increments. Originally
107 * it was doubling in size, which was a waste. Growing it
108 * linearly is not formally optimal, but is simpler to implement.
109 * We grow table by minimally required 4*n that would accommodate
110 * the index.
111 */
112 ctx->max_l_index += (idx - ctx->max_l_index + 4) & ~3;
113 tmp_ptr = OPENSSL_realloc(ctx->l, ctx->max_l_index * sizeof(OCB_BLOCK));
114 if (tmp_ptr == NULL) /* prevent ctx->l from being clobbered */
115 return NULL;
116 ctx->l = tmp_ptr;
117 }
118 while (l_index < idx) {
119 ocb_double(ctx->l + l_index, ctx->l + l_index + 1);
120 l_index++;
121 }
122 ctx->l_index = l_index;
123
124 return ctx->l + idx;
125}
126
127/*
128 * Create a new OCB128_CONTEXT
129 */
130OCB128_CONTEXT *CRYPTO_ocb128_new(void *keyenc, void *keydec,
131 block128_f encrypt, block128_f decrypt,
132 ocb128_f stream)
133{
134 OCB128_CONTEXT *octx;
135 int ret;
136
137 if ((octx = OPENSSL_malloc(sizeof(*octx))) != NULL) {
138 ret = CRYPTO_ocb128_init(octx, keyenc, keydec, encrypt, decrypt,
139 stream);
140 if (ret)
141 return octx;
142 OPENSSL_free(octx);
143 }
144
145 return NULL;
146}
147
148/*
149 * Initialise an existing OCB128_CONTEXT
150 */
151int CRYPTO_ocb128_init(OCB128_CONTEXT *ctx, void *keyenc, void *keydec,
152 block128_f encrypt, block128_f decrypt,
153 ocb128_f stream)
154{
155 memset(ctx, 0, sizeof(*ctx));
156 ctx->l_index = 0;
157 ctx->max_l_index = 5;
158 if ((ctx->l = OPENSSL_malloc(ctx->max_l_index * 16)) == NULL)
159 return 0;
160
161 /*
162 * We set both the encryption and decryption key schedules - decryption
163 * needs both. Don't really need decryption schedule if only doing
164 * encryption - but it simplifies things to take it anyway
165 */
166 ctx->encrypt = encrypt;
167 ctx->decrypt = decrypt;
168 ctx->stream = stream;
169 ctx->keyenc = keyenc;
170 ctx->keydec = keydec;
171
172 /* L_* = ENCIPHER(K, zeros(128)) */
173 ctx->encrypt(ctx->l_star.c, ctx->l_star.c, ctx->keyenc);
174
175 /* L_$ = double(L_*) */
176 ocb_double(&ctx->l_star, &ctx->l_dollar);
177
178 /* L_0 = double(L_$) */
179 ocb_double(&ctx->l_dollar, ctx->l);
180
181 /* L_{i} = double(L_{i-1}) */
182 ocb_double(ctx->l, ctx->l+1);
183 ocb_double(ctx->l+1, ctx->l+2);
184 ocb_double(ctx->l+2, ctx->l+3);
185 ocb_double(ctx->l+3, ctx->l+4);
186 ctx->l_index = 4; /* enough to process up to 496 bytes */
187
188 return 1;
189}
190
191/*
192 * Copy an OCB128_CONTEXT object
193 */
194int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT *dest, OCB128_CONTEXT *src,
195 void *keyenc, void *keydec)
196{
197 memcpy(dest, src, sizeof(OCB128_CONTEXT));
198 if (keyenc)
199 dest->keyenc = keyenc;
200 if (keydec)
201 dest->keydec = keydec;
202 if (src->l) {
203 if ((dest->l = OPENSSL_malloc(src->max_l_index * 16)) == NULL)
204 return 0;
205 memcpy(dest->l, src->l, (src->l_index + 1) * 16);
206 }
207 return 1;
208}
209
210/*
211 * Set the IV to be used for this operation. Must be 1 - 15 bytes.
212 */
213int CRYPTO_ocb128_setiv(OCB128_CONTEXT *ctx, const unsigned char *iv,
214 size_t len, size_t taglen)
215{
216 unsigned char ktop[16], tmp[16], mask;
217 unsigned char stretch[24], nonce[16];
218 size_t bottom, shift;
219
220 /*
221 * Spec says IV is 120 bits or fewer - it allows non byte aligned lengths.
222 * We don't support this at this stage
223 */
224 if ((len > 15) || (len < 1) || (taglen > 16) || (taglen < 1)) {
225 return -1;
226 }
227
228 /* Reset nonce-dependent variables */
229 memset(&ctx->sess, 0, sizeof(ctx->sess));
230
231 /* Nonce = num2str(TAGLEN mod 128,7) || zeros(120-bitlen(N)) || 1 || N */
232 nonce[0] = ((taglen * 8) % 128) << 1;
233 memset(nonce + 1, 0, 15);
234 memcpy(nonce + 16 - len, iv, len);
235 nonce[15 - len] |= 1;
236
237 /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */
238 memcpy(tmp, nonce, 16);
239 tmp[15] &= 0xc0;
240 ctx->encrypt(tmp, ktop, ctx->keyenc);
241
242 /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
243 memcpy(stretch, ktop, 16);
244 ocb_block_xor(ktop, ktop + 1, 8, stretch + 16);
245
246 /* bottom = str2num(Nonce[123..128]) */
247 bottom = nonce[15] & 0x3f;
248
249 /* Offset_0 = Stretch[1+bottom..128+bottom] */
250 shift = bottom % 8;
251 ocb_block_lshift(stretch + (bottom / 8), shift, ctx->sess.offset.c);
252 mask = 0xff;
253 mask <<= 8 - shift;
254 ctx->sess.offset.c[15] |=
255 (*(stretch + (bottom / 8) + 16) & mask) >> (8 - shift);
256
257 return 1;
258}
259
260/*
261 * Provide any AAD. This can be called multiple times. Only the final time can
262 * have a partial block
263 */
264int CRYPTO_ocb128_aad(OCB128_CONTEXT *ctx, const unsigned char *aad,
265 size_t len)
266{
267 u64 i, all_num_blocks;
268 size_t num_blocks, last_len;
269 OCB_BLOCK tmp;
270
271 /* Calculate the number of blocks of AAD provided now, and so far */
272 num_blocks = len / 16;
273 all_num_blocks = num_blocks + ctx->sess.blocks_hashed;
274
275 /* Loop through all full blocks of AAD */
276 for (i = ctx->sess.blocks_hashed + 1; i <= all_num_blocks; i++) {
277 OCB_BLOCK *lookup;
278
279 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
280 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
281 if (lookup == NULL)
282 return 0;
283 ocb_block16_xor(&ctx->sess.offset_aad, lookup, &ctx->sess.offset_aad);
284
285 memcpy(tmp.c, aad, 16);
286 aad += 16;
287
288 /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
289 ocb_block16_xor(&ctx->sess.offset_aad, &tmp, &tmp);
290 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
291 ocb_block16_xor(&tmp, &ctx->sess.sum, &ctx->sess.sum);
292 }
293
294 /*
295 * Check if we have any partial blocks left over. This is only valid in the
296 * last call to this function
297 */
298 last_len = len % 16;
299
300 if (last_len > 0) {
301 /* Offset_* = Offset_m xor L_* */
302 ocb_block16_xor(&ctx->sess.offset_aad, &ctx->l_star,
303 &ctx->sess.offset_aad);
304
305 /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
306 memset(tmp.c, 0, 16);
307 memcpy(tmp.c, aad, last_len);
308 tmp.c[last_len] = 0x80;
309 ocb_block16_xor(&ctx->sess.offset_aad, &tmp, &tmp);
310
311 /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
312 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
313 ocb_block16_xor(&tmp, &ctx->sess.sum, &ctx->sess.sum);
314 }
315
316 ctx->sess.blocks_hashed = all_num_blocks;
317
318 return 1;
319}
320
321/*
322 * Provide any data to be encrypted. This can be called multiple times. Only
323 * the final time can have a partial block
324 */
325int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx,
326 const unsigned char *in, unsigned char *out,
327 size_t len)
328{
329 u64 i, all_num_blocks;
330 size_t num_blocks, last_len;
331
332 /*
333 * Calculate the number of blocks of data to be encrypted provided now, and
334 * so far
335 */
336 num_blocks = len / 16;
337 all_num_blocks = num_blocks + ctx->sess.blocks_processed;
338
339 if (num_blocks && all_num_blocks == (size_t)all_num_blocks
340 && ctx->stream != NULL) {
341 size_t max_idx = 0, top = (size_t)all_num_blocks;
342
343 /*
344 * See how many L_{i} entries we need to process data at hand
345 * and pre-compute missing entries in the table [if any]...
346 */
347 while (top >>= 1)
348 max_idx++;
349 if (ocb_lookup_l(ctx, max_idx) == NULL)
350 return 0;
351
352 ctx->stream(in, out, num_blocks, ctx->keyenc,
353 (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c,
354 (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c);
355 } else {
356 /* Loop through all full blocks to be encrypted */
357 for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) {
358 OCB_BLOCK *lookup;
359 OCB_BLOCK tmp;
360
361 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
362 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
363 if (lookup == NULL)
364 return 0;
365 ocb_block16_xor(&ctx->sess.offset, lookup, &ctx->sess.offset);
366
367 memcpy(tmp.c, in, 16);
368 in += 16;
369
370 /* Checksum_i = Checksum_{i-1} xor P_i */
371 ocb_block16_xor(&tmp, &ctx->sess.checksum, &ctx->sess.checksum);
372
373 /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
374 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
375 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
376 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
377
378 memcpy(out, tmp.c, 16);
379 out += 16;
380 }
381 }
382
383 /*
384 * Check if we have any partial blocks left over. This is only valid in the
385 * last call to this function
386 */
387 last_len = len % 16;
388
389 if (last_len > 0) {
390 OCB_BLOCK pad;
391
392 /* Offset_* = Offset_m xor L_* */
393 ocb_block16_xor(&ctx->sess.offset, &ctx->l_star, &ctx->sess.offset);
394
395 /* Pad = ENCIPHER(K, Offset_*) */
396 ctx->encrypt(ctx->sess.offset.c, pad.c, ctx->keyenc);
397
398 /* C_* = P_* xor Pad[1..bitlen(P_*)] */
399 ocb_block_xor(in, pad.c, last_len, out);
400
401 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
402 memset(pad.c, 0, 16); /* borrow pad */
403 memcpy(pad.c, in, last_len);
404 pad.c[last_len] = 0x80;
405 ocb_block16_xor(&pad, &ctx->sess.checksum, &ctx->sess.checksum);
406 }
407
408 ctx->sess.blocks_processed = all_num_blocks;
409
410 return 1;
411}
412
413/*
414 * Provide any data to be decrypted. This can be called multiple times. Only
415 * the final time can have a partial block
416 */
417int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx,
418 const unsigned char *in, unsigned char *out,
419 size_t len)
420{
421 u64 i, all_num_blocks;
422 size_t num_blocks, last_len;
423
424 /*
425 * Calculate the number of blocks of data to be decrypted provided now, and
426 * so far
427 */
428 num_blocks = len / 16;
429 all_num_blocks = num_blocks + ctx->sess.blocks_processed;
430
431 if (num_blocks && all_num_blocks == (size_t)all_num_blocks
432 && ctx->stream != NULL) {
433 size_t max_idx = 0, top = (size_t)all_num_blocks;
434
435 /*
436 * See how many L_{i} entries we need to process data at hand
437 * and pre-compute missing entries in the table [if any]...
438 */
439 while (top >>= 1)
440 max_idx++;
441 if (ocb_lookup_l(ctx, max_idx) == NULL)
442 return 0;
443
444 ctx->stream(in, out, num_blocks, ctx->keydec,
445 (size_t)ctx->sess.blocks_processed + 1, ctx->sess.offset.c,
446 (const unsigned char (*)[16])ctx->l, ctx->sess.checksum.c);
447 } else {
448 OCB_BLOCK tmp;
449
450 /* Loop through all full blocks to be decrypted */
451 for (i = ctx->sess.blocks_processed + 1; i <= all_num_blocks; i++) {
452
453 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
454 OCB_BLOCK *lookup = ocb_lookup_l(ctx, ocb_ntz(i));
455 if (lookup == NULL)
456 return 0;
457 ocb_block16_xor(&ctx->sess.offset, lookup, &ctx->sess.offset);
458
459 memcpy(tmp.c, in, 16);
460 in += 16;
461
462 /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */
463 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
464 ctx->decrypt(tmp.c, tmp.c, ctx->keydec);
465 ocb_block16_xor(&ctx->sess.offset, &tmp, &tmp);
466
467 /* Checksum_i = Checksum_{i-1} xor P_i */
468 ocb_block16_xor(&tmp, &ctx->sess.checksum, &ctx->sess.checksum);
469
470 memcpy(out, tmp.c, 16);
471 out += 16;
472 }
473 }
474
475 /*
476 * Check if we have any partial blocks left over. This is only valid in the
477 * last call to this function
478 */
479 last_len = len % 16;
480
481 if (last_len > 0) {
482 OCB_BLOCK pad;
483
484 /* Offset_* = Offset_m xor L_* */
485 ocb_block16_xor(&ctx->sess.offset, &ctx->l_star, &ctx->sess.offset);
486
487 /* Pad = ENCIPHER(K, Offset_*) */
488 ctx->encrypt(ctx->sess.offset.c, pad.c, ctx->keyenc);
489
490 /* P_* = C_* xor Pad[1..bitlen(C_*)] */
491 ocb_block_xor(in, pad.c, last_len, out);
492
493 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
494 memset(pad.c, 0, 16); /* borrow pad */
495 memcpy(pad.c, out, last_len);
496 pad.c[last_len] = 0x80;
497 ocb_block16_xor(&pad, &ctx->sess.checksum, &ctx->sess.checksum);
498 }
499
500 ctx->sess.blocks_processed = all_num_blocks;
501
502 return 1;
503}
504
505static int ocb_finish(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len,
506 int write)
507{
508 OCB_BLOCK tmp;
509
510 if (len > 16 || len < 1) {
511 return -1;
512 }
513
514 /*
515 * Tag = ENCIPHER(K, Checksum_* xor Offset_* xor L_$) xor HASH(K,A)
516 */
517 ocb_block16_xor(&ctx->sess.checksum, &ctx->sess.offset, &tmp);
518 ocb_block16_xor(&ctx->l_dollar, &tmp, &tmp);
519 ctx->encrypt(tmp.c, tmp.c, ctx->keyenc);
520 ocb_block16_xor(&tmp, &ctx->sess.sum, &tmp);
521
522 if (write) {
523 memcpy(tag, &tmp, len);
524 return 1;
525 } else {
526 return CRYPTO_memcmp(&tmp, tag, len);
527 }
528}
529
530/*
531 * Calculate the tag and verify it against the supplied tag
532 */
533int CRYPTO_ocb128_finish(OCB128_CONTEXT *ctx, const unsigned char *tag,
534 size_t len)
535{
536 return ocb_finish(ctx, (unsigned char*)tag, len, 0);
537}
538
539/*
540 * Retrieve the calculated tag
541 */
542int CRYPTO_ocb128_tag(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len)
543{
544 return ocb_finish(ctx, tag, len, 1);
545}
546
547/*
548 * Release all resources
549 */
550void CRYPTO_ocb128_cleanup(OCB128_CONTEXT *ctx)
551{
552 if (ctx) {
553 OPENSSL_clear_free(ctx->l, ctx->max_l_index * 16);
554 OPENSSL_cleanse(ctx, sizeof(*ctx));
555 }
556}
557
558#endif /* OPENSSL_NO_OCB */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette