VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.5/crypto/evp/e_aes.c@ 105132

最後變更 在這個檔案從105132是 104078,由 vboxsync 提交於 12 月 前

openssl-3.1.5: Applied and adjusted our OpenSSL changes to 3.1.4. bugref:10638

檔案大小: 133.5 KB
 
1/*
2 * Copyright 2001-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10/*
11 * This file uses the low-level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14#include "internal/deprecated.h"
15
16#include <string.h>
17#include <assert.h>
18#include <openssl/opensslconf.h>
19#include <openssl/crypto.h>
20#include <openssl/evp.h>
21#include <openssl/err.h>
22#include <openssl/aes.h>
23#include <openssl/rand.h>
24#include <openssl/cmac.h>
25#include "crypto/evp.h"
26#include "internal/cryptlib.h"
27#include "crypto/modes.h"
28#include "crypto/siv.h"
29#include "crypto/aes_platform.h"
30#include "evp_local.h"
31
32typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42} EVP_AES_KEY;
43
44typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60} EVP_AES_GCM_CTX;
61
62typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72} EVP_AES_XTS_CTX;
73
74#ifdef FIPS_MODULE
75static const int allow_insecure_decrypt = 0;
76#else
77static const int allow_insecure_decrypt = 1;
78#endif
79
80typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93} EVP_AES_CCM_CTX;
94
95#ifndef OPENSSL_NO_OCB
96typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116} EVP_AES_OCB_CTX;
117#endif
118
119#define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121/* increment counter (64-bit int) by 1 */
122static void ctr64_inc(unsigned char *counter)
123{
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135}
136
137#if defined(AESNI_CAPABLE)
138# if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139# define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141# undef AES_GCM_ASM2 /* minor size optimization */
142# endif
143
144static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146{
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
150
151 if (keylen <= 0) {
152 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
153 return 0;
154 }
155 mode = EVP_CIPHER_CTX_get_mode(ctx);
156 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
157 && !enc) {
158 ret = aesni_set_decrypt_key(key, keylen, &dat->ks.ks);
159 dat->block = (block128_f) aesni_decrypt;
160 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
161 (cbc128_f) aesni_cbc_encrypt : NULL;
162 } else {
163 ret = aesni_set_encrypt_key(key, keylen, &dat->ks.ks);
164 dat->block = (block128_f) aesni_encrypt;
165 if (mode == EVP_CIPH_CBC_MODE)
166 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
167 else if (mode == EVP_CIPH_CTR_MODE)
168 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
169 else
170 dat->stream.cbc = NULL;
171 }
172
173 if (ret < 0) {
174 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
175 return 0;
176 }
177
178 return 1;
179}
180
181static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
182 const unsigned char *in, size_t len)
183{
184 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
185 ctx->iv, EVP_CIPHER_CTX_is_encrypting(ctx));
186
187 return 1;
188}
189
190static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
191 const unsigned char *in, size_t len)
192{
193 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
194
195 if (len < bl)
196 return 1;
197
198 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
199 EVP_CIPHER_CTX_is_encrypting(ctx));
200
201 return 1;
202}
203
204# define aesni_ofb_cipher aes_ofb_cipher
205static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
206 const unsigned char *in, size_t len);
207
208# define aesni_cfb_cipher aes_cfb_cipher
209static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
210 const unsigned char *in, size_t len);
211
212# define aesni_cfb8_cipher aes_cfb8_cipher
213static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
214 const unsigned char *in, size_t len);
215
216# define aesni_cfb1_cipher aes_cfb1_cipher
217static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
218 const unsigned char *in, size_t len);
219
220# define aesni_ctr_cipher aes_ctr_cipher
221static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
222 const unsigned char *in, size_t len);
223
224static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
225 const unsigned char *iv, int enc)
226{
227 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX, ctx);
228
229 if (iv == NULL && key == NULL)
230 return 1;
231
232 if (key) {
233 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
234
235 if (keylen <= 0) {
236 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
237 return 0;
238 }
239 aesni_set_encrypt_key(key, keylen, &gctx->ks.ks);
240 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
241 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
242 /*
243 * If we have an iv can set it directly, otherwise use saved IV.
244 */
245 if (iv == NULL && gctx->iv_set)
246 iv = gctx->iv;
247 if (iv) {
248 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
249 gctx->iv_set = 1;
250 }
251 gctx->key_set = 1;
252 } else {
253 /* If key set use IV, otherwise copy */
254 if (gctx->key_set)
255 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
256 else
257 memcpy(gctx->iv, iv, gctx->ivlen);
258 gctx->iv_set = 1;
259 gctx->iv_gen = 0;
260 }
261 return 1;
262}
263
264# define aesni_gcm_cipher aes_gcm_cipher
265static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
266 const unsigned char *in, size_t len);
267
268static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
269 const unsigned char *iv, int enc)
270{
271 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
272
273 if (iv == NULL && key == NULL)
274 return 1;
275
276 if (key) {
277 /* The key is two half length keys in reality */
278 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
279 const int bytes = keylen / 2;
280 const int bits = bytes * 8;
281
282 if (keylen <= 0) {
283 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
284 return 0;
285 }
286 /*
287 * Verify that the two keys are different.
288 *
289 * This addresses Rogaway's vulnerability.
290 * See comment in aes_xts_init_key() below.
291 */
292 if ((!allow_insecure_decrypt || enc)
293 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
294 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
295 return 0;
296 }
297
298 /* key_len is two AES keys */
299 if (enc) {
300 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
301 xctx->xts.block1 = (block128_f) aesni_encrypt;
302 xctx->stream = aesni_xts_encrypt;
303 } else {
304 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
305 xctx->xts.block1 = (block128_f) aesni_decrypt;
306 xctx->stream = aesni_xts_decrypt;
307 }
308
309 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
310 xctx->xts.block2 = (block128_f) aesni_encrypt;
311
312 xctx->xts.key1 = &xctx->ks1;
313 }
314
315 if (iv) {
316 xctx->xts.key2 = &xctx->ks2;
317 memcpy(ctx->iv, iv, 16);
318 }
319
320 return 1;
321}
322
323# define aesni_xts_cipher aes_xts_cipher
324static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
325 const unsigned char *in, size_t len);
326
327static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
328 const unsigned char *iv, int enc)
329{
330 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
331
332 if (iv == NULL && key == NULL)
333 return 1;
334
335 if (key != NULL) {
336 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
337
338 if (keylen <= 0) {
339 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
340 return 0;
341 }
342 aesni_set_encrypt_key(key, keylen, &cctx->ks.ks);
343 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
344 &cctx->ks, (block128_f) aesni_encrypt);
345 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
346 (ccm128_f) aesni_ccm64_decrypt_blocks;
347 cctx->key_set = 1;
348 }
349 if (iv) {
350 memcpy(ctx->iv, iv, 15 - cctx->L);
351 cctx->iv_set = 1;
352 }
353 return 1;
354}
355
356# define aesni_ccm_cipher aes_ccm_cipher
357static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
358 const unsigned char *in, size_t len);
359
360# ifndef OPENSSL_NO_OCB
361static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
362 const unsigned char *iv, int enc)
363{
364 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
365
366 if (iv == NULL && key == NULL)
367 return 1;
368
369 if (key != NULL) {
370 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
371
372 if (keylen <= 0) {
373 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
374 return 0;
375 }
376 do {
377 /*
378 * We set both the encrypt and decrypt key here because decrypt
379 * needs both. We could possibly optimise to remove setting the
380 * decrypt for an encryption operation.
381 */
382 aesni_set_encrypt_key(key, keylen, &octx->ksenc.ks);
383 aesni_set_decrypt_key(key, keylen, &octx->ksdec.ks);
384 if (!CRYPTO_ocb128_init(&octx->ocb,
385 &octx->ksenc.ks, &octx->ksdec.ks,
386 (block128_f) aesni_encrypt,
387 (block128_f) aesni_decrypt,
388 enc ? aesni_ocb_encrypt
389 : aesni_ocb_decrypt))
390 return 0;
391 }
392 while (0);
393
394 /*
395 * If we have an iv we can set it directly, otherwise use saved IV.
396 */
397 if (iv == NULL && octx->iv_set)
398 iv = octx->iv;
399 if (iv) {
400 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
401 != 1)
402 return 0;
403 octx->iv_set = 1;
404 }
405 octx->key_set = 1;
406 } else {
407 /* If key set use IV, otherwise copy */
408 if (octx->key_set)
409 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
410 else
411 memcpy(octx->iv, iv, octx->ivlen);
412 octx->iv_set = 1;
413 }
414 return 1;
415}
416
417# define aesni_ocb_cipher aes_ocb_cipher
418static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
419 const unsigned char *in, size_t len);
420# endif /* OPENSSL_NO_OCB */
421
422# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
423static const EVP_CIPHER aesni_##keylen##_##mode = { \
424 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
425 flags|EVP_CIPH_##MODE##_MODE, \
426 EVP_ORIG_GLOBAL, \
427 aesni_init_key, \
428 aesni_##mode##_cipher, \
429 NULL, \
430 sizeof(EVP_AES_KEY), \
431 NULL,NULL,NULL,NULL }; \
432static const EVP_CIPHER aes_##keylen##_##mode = { \
433 nid##_##keylen##_##nmode,blocksize, \
434 keylen/8,ivlen, \
435 flags|EVP_CIPH_##MODE##_MODE, \
436 EVP_ORIG_GLOBAL, \
437 aes_init_key, \
438 aes_##mode##_cipher, \
439 NULL, \
440 sizeof(EVP_AES_KEY), \
441 NULL,NULL,NULL,NULL }; \
442const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
443{ return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
444
445# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
446static const EVP_CIPHER aesni_##keylen##_##mode = { \
447 nid##_##keylen##_##mode,blocksize, \
448 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
449 ivlen, \
450 flags|EVP_CIPH_##MODE##_MODE, \
451 EVP_ORIG_GLOBAL, \
452 aesni_##mode##_init_key, \
453 aesni_##mode##_cipher, \
454 aes_##mode##_cleanup, \
455 sizeof(EVP_AES_##MODE##_CTX), \
456 NULL,NULL,aes_##mode##_ctrl,NULL }; \
457static const EVP_CIPHER aes_##keylen##_##mode = { \
458 nid##_##keylen##_##mode,blocksize, \
459 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
460 ivlen, \
461 flags|EVP_CIPH_##MODE##_MODE, \
462 EVP_ORIG_GLOBAL, \
463 aes_##mode##_init_key, \
464 aes_##mode##_cipher, \
465 aes_##mode##_cleanup, \
466 sizeof(EVP_AES_##MODE##_CTX), \
467 NULL,NULL,aes_##mode##_ctrl,NULL }; \
468const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
469{ return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
470
471#elif defined(SPARC_AES_CAPABLE)
472
473static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
474 const unsigned char *iv, int enc)
475{
476 int ret, mode, bits;
477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
478
479 mode = EVP_CIPHER_CTX_get_mode(ctx);
480 bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
481 if (bits <= 0) {
482 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
483 return 0;
484 }
485 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
486 && !enc) {
487 ret = 0;
488 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
489 dat->block = (block128_f) aes_t4_decrypt;
490 switch (bits) {
491 case 128:
492 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
493 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
494 break;
495 case 192:
496 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
497 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
498 break;
499 case 256:
500 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
501 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
502 break;
503 default:
504 ret = -1;
505 }
506 } else {
507 ret = 0;
508 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
509 dat->block = (block128_f) aes_t4_encrypt;
510 switch (bits) {
511 case 128:
512 if (mode == EVP_CIPH_CBC_MODE)
513 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
514 else if (mode == EVP_CIPH_CTR_MODE)
515 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
516 else
517 dat->stream.cbc = NULL;
518 break;
519 case 192:
520 if (mode == EVP_CIPH_CBC_MODE)
521 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
522 else if (mode == EVP_CIPH_CTR_MODE)
523 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
524 else
525 dat->stream.cbc = NULL;
526 break;
527 case 256:
528 if (mode == EVP_CIPH_CBC_MODE)
529 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
530 else if (mode == EVP_CIPH_CTR_MODE)
531 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
532 else
533 dat->stream.cbc = NULL;
534 break;
535 default:
536 ret = -1;
537 }
538 }
539
540 if (ret < 0) {
541 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
542 return 0;
543 }
544
545 return 1;
546}
547
548# define aes_t4_cbc_cipher aes_cbc_cipher
549static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
550 const unsigned char *in, size_t len);
551
552# define aes_t4_ecb_cipher aes_ecb_cipher
553static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
554 const unsigned char *in, size_t len);
555
556# define aes_t4_ofb_cipher aes_ofb_cipher
557static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
558 const unsigned char *in, size_t len);
559
560# define aes_t4_cfb_cipher aes_cfb_cipher
561static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
562 const unsigned char *in, size_t len);
563
564# define aes_t4_cfb8_cipher aes_cfb8_cipher
565static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
566 const unsigned char *in, size_t len);
567
568# define aes_t4_cfb1_cipher aes_cfb1_cipher
569static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
570 const unsigned char *in, size_t len);
571
572# define aes_t4_ctr_cipher aes_ctr_cipher
573static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
574 const unsigned char *in, size_t len);
575
576static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
577 const unsigned char *iv, int enc)
578{
579 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
580
581 if (iv == NULL && key == NULL)
582 return 1;
583 if (key) {
584 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
585
586 if (bits <= 0) {
587 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
588 return 0;
589 }
590 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
591 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
592 (block128_f) aes_t4_encrypt);
593 switch (bits) {
594 case 128:
595 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
596 break;
597 case 192:
598 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
599 break;
600 case 256:
601 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
602 break;
603 default:
604 return 0;
605 }
606 /*
607 * If we have an iv can set it directly, otherwise use saved IV.
608 */
609 if (iv == NULL && gctx->iv_set)
610 iv = gctx->iv;
611 if (iv) {
612 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
613 gctx->iv_set = 1;
614 }
615 gctx->key_set = 1;
616 } else {
617 /* If key set use IV, otherwise copy */
618 if (gctx->key_set)
619 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
620 else
621 memcpy(gctx->iv, iv, gctx->ivlen);
622 gctx->iv_set = 1;
623 gctx->iv_gen = 0;
624 }
625 return 1;
626}
627
628# define aes_t4_gcm_cipher aes_gcm_cipher
629static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
630 const unsigned char *in, size_t len);
631
632static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
633 const unsigned char *iv, int enc)
634{
635 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
636
637 if (!iv && !key)
638 return 1;
639
640 if (key) {
641 /* The key is two half length keys in reality */
642 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
643 const int bytes = keylen / 2;
644 const int bits = bytes * 8;
645
646 if (keylen <= 0) {
647 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
648 return 0;
649 }
650 /*
651 * Verify that the two keys are different.
652 *
653 * This addresses Rogaway's vulnerability.
654 * See comment in aes_xts_init_key() below.
655 */
656 if ((!allow_insecure_decrypt || enc)
657 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
658 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
659 return 0;
660 }
661
662 xctx->stream = NULL;
663 /* key_len is two AES keys */
664 if (enc) {
665 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
666 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
667 switch (bits) {
668 case 128:
669 xctx->stream = aes128_t4_xts_encrypt;
670 break;
671 case 256:
672 xctx->stream = aes256_t4_xts_encrypt;
673 break;
674 default:
675 return 0;
676 }
677 } else {
678 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
679 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
680 switch (bits) {
681 case 128:
682 xctx->stream = aes128_t4_xts_decrypt;
683 break;
684 case 256:
685 xctx->stream = aes256_t4_xts_decrypt;
686 break;
687 default:
688 return 0;
689 }
690 }
691
692 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
693 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
694
695 xctx->xts.key1 = &xctx->ks1;
696 }
697
698 if (iv) {
699 xctx->xts.key2 = &xctx->ks2;
700 memcpy(ctx->iv, iv, 16);
701 }
702
703 return 1;
704}
705
706# define aes_t4_xts_cipher aes_xts_cipher
707static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
708 const unsigned char *in, size_t len);
709
710static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
711 const unsigned char *iv, int enc)
712{
713 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
714
715 if (iv == NULL && key == NULL)
716 return 1;
717
718 if (key != NULL) {
719 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
720
721 if (bits <= 0) {
722 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
723 return 0;
724 }
725 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
726 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
727 &cctx->ks, (block128_f) aes_t4_encrypt);
728 cctx->str = NULL;
729 cctx->key_set = 1;
730 }
731 if (iv) {
732 memcpy(ctx->iv, iv, 15 - cctx->L);
733 cctx->iv_set = 1;
734 }
735 return 1;
736}
737
738# define aes_t4_ccm_cipher aes_ccm_cipher
739static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
740 const unsigned char *in, size_t len);
741
742# ifndef OPENSSL_NO_OCB
743static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
744 const unsigned char *iv, int enc)
745{
746 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
747
748 if (iv == NULL && key == NULL)
749 return 1;
750
751 if (key != NULL) {
752 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
753
754 if (keylen <= 0) {
755 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
756 return 0;
757 }
758 do {
759 /*
760 * We set both the encrypt and decrypt key here because decrypt
761 * needs both. We could possibly optimise to remove setting the
762 * decrypt for an encryption operation.
763 */
764 aes_t4_set_encrypt_key(key, keylen, &octx->ksenc.ks);
765 aes_t4_set_decrypt_key(key, keylen, &octx->ksdec.ks);
766 if (!CRYPTO_ocb128_init(&octx->ocb,
767 &octx->ksenc.ks, &octx->ksdec.ks,
768 (block128_f) aes_t4_encrypt,
769 (block128_f) aes_t4_decrypt,
770 NULL))
771 return 0;
772 }
773 while (0);
774
775 /*
776 * If we have an iv we can set it directly, otherwise use saved IV.
777 */
778 if (iv == NULL && octx->iv_set)
779 iv = octx->iv;
780 if (iv) {
781 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
782 != 1)
783 return 0;
784 octx->iv_set = 1;
785 }
786 octx->key_set = 1;
787 } else {
788 /* If key set use IV, otherwise copy */
789 if (octx->key_set)
790 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
791 else
792 memcpy(octx->iv, iv, octx->ivlen);
793 octx->iv_set = 1;
794 }
795 return 1;
796}
797
798# define aes_t4_ocb_cipher aes_ocb_cipher
799static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
800 const unsigned char *in, size_t len);
801# endif /* OPENSSL_NO_OCB */
802
803# ifndef OPENSSL_NO_SIV
804# define aes_t4_siv_init_key aes_siv_init_key
805# define aes_t4_siv_cipher aes_siv_cipher
806# endif /* OPENSSL_NO_SIV */
807
808# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
809static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
810 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
811 flags|EVP_CIPH_##MODE##_MODE, \
812 EVP_ORIG_GLOBAL, \
813 aes_t4_init_key, \
814 aes_t4_##mode##_cipher, \
815 NULL, \
816 sizeof(EVP_AES_KEY), \
817 NULL,NULL,NULL,NULL }; \
818static const EVP_CIPHER aes_##keylen##_##mode = { \
819 nid##_##keylen##_##nmode,blocksize, \
820 keylen/8,ivlen, \
821 flags|EVP_CIPH_##MODE##_MODE, \
822 EVP_ORIG_GLOBAL, \
823 aes_init_key, \
824 aes_##mode##_cipher, \
825 NULL, \
826 sizeof(EVP_AES_KEY), \
827 NULL,NULL,NULL,NULL }; \
828const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
829{ return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
830
831# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
832static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
833 nid##_##keylen##_##mode,blocksize, \
834 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
835 ivlen, \
836 flags|EVP_CIPH_##MODE##_MODE, \
837 EVP_ORIG_GLOBAL, \
838 aes_t4_##mode##_init_key, \
839 aes_t4_##mode##_cipher, \
840 aes_##mode##_cleanup, \
841 sizeof(EVP_AES_##MODE##_CTX), \
842 NULL,NULL,aes_##mode##_ctrl,NULL }; \
843static const EVP_CIPHER aes_##keylen##_##mode = { \
844 nid##_##keylen##_##mode,blocksize, \
845 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
846 ivlen, \
847 flags|EVP_CIPH_##MODE##_MODE, \
848 EVP_ORIG_GLOBAL, \
849 aes_##mode##_init_key, \
850 aes_##mode##_cipher, \
851 aes_##mode##_cleanup, \
852 sizeof(EVP_AES_##MODE##_CTX), \
853 NULL,NULL,aes_##mode##_ctrl,NULL }; \
854const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
855{ return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
856
857#elif defined(S390X_aes_128_CAPABLE)
858/* IBM S390X support */
859typedef struct {
860 union {
861 OSSL_UNION_ALIGN;
862 /*-
863 * KM-AES parameter block - begin
864 * (see z/Architecture Principles of Operation >= SA22-7832-06)
865 */
866 struct {
867 unsigned char k[32];
868 } param;
869 /* KM-AES parameter block - end */
870 } km;
871 unsigned int fc;
872} S390X_AES_ECB_CTX;
873
874typedef struct {
875 union {
876 OSSL_UNION_ALIGN;
877 /*-
878 * KMO-AES parameter block - begin
879 * (see z/Architecture Principles of Operation >= SA22-7832-08)
880 */
881 struct {
882 unsigned char cv[16];
883 unsigned char k[32];
884 } param;
885 /* KMO-AES parameter block - end */
886 } kmo;
887 unsigned int fc;
888} S390X_AES_OFB_CTX;
889
890typedef struct {
891 union {
892 OSSL_UNION_ALIGN;
893 /*-
894 * KMF-AES parameter block - begin
895 * (see z/Architecture Principles of Operation >= SA22-7832-08)
896 */
897 struct {
898 unsigned char cv[16];
899 unsigned char k[32];
900 } param;
901 /* KMF-AES parameter block - end */
902 } kmf;
903 unsigned int fc;
904} S390X_AES_CFB_CTX;
905
906typedef struct {
907 union {
908 OSSL_UNION_ALIGN;
909 /*-
910 * KMA-GCM-AES parameter block - begin
911 * (see z/Architecture Principles of Operation >= SA22-7832-11)
912 */
913 struct {
914 unsigned char reserved[12];
915 union {
916 unsigned int w;
917 unsigned char b[4];
918 } cv;
919 union {
920 unsigned long long g[2];
921 unsigned char b[16];
922 } t;
923 unsigned char h[16];
924 unsigned long long taadl;
925 unsigned long long tpcl;
926 union {
927 unsigned long long g[2];
928 unsigned int w[4];
929 } j0;
930 unsigned char k[32];
931 } param;
932 /* KMA-GCM-AES parameter block - end */
933 } kma;
934 unsigned int fc;
935 int key_set;
936
937 unsigned char *iv;
938 int ivlen;
939 int iv_set;
940 int iv_gen;
941
942 int taglen;
943
944 unsigned char ares[16];
945 unsigned char mres[16];
946 unsigned char kres[16];
947 int areslen;
948 int mreslen;
949 int kreslen;
950
951 int tls_aad_len;
952 uint64_t tls_enc_records; /* Number of TLS records encrypted */
953} S390X_AES_GCM_CTX;
954
955typedef struct {
956 union {
957 OSSL_UNION_ALIGN;
958 /*-
959 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
960 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
961 * rounds field is used to store the function code and that the key
962 * schedule is not stored (if aes hardware support is detected).
963 */
964 struct {
965 unsigned char pad[16];
966 AES_KEY k;
967 } key;
968
969 struct {
970 /*-
971 * KMAC-AES parameter block - begin
972 * (see z/Architecture Principles of Operation >= SA22-7832-08)
973 */
974 struct {
975 union {
976 unsigned long long g[2];
977 unsigned char b[16];
978 } icv;
979 unsigned char k[32];
980 } kmac_param;
981 /* KMAC-AES parameter block - end */
982
983 union {
984 unsigned long long g[2];
985 unsigned char b[16];
986 } nonce;
987 union {
988 unsigned long long g[2];
989 unsigned char b[16];
990 } buf;
991
992 unsigned long long blocks;
993 int l;
994 int m;
995 int tls_aad_len;
996 int iv_set;
997 int tag_set;
998 int len_set;
999 int key_set;
1000
1001 unsigned char pad[140];
1002 unsigned int fc;
1003 } ccm;
1004 } aes;
1005} S390X_AES_CCM_CTX;
1006
1007# define s390x_aes_init_key aes_init_key
1008static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
1009 const unsigned char *iv, int enc);
1010
1011# define S390X_AES_CBC_CTX EVP_AES_KEY
1012
1013# define s390x_aes_cbc_init_key aes_init_key
1014
1015# define s390x_aes_cbc_cipher aes_cbc_cipher
1016static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1017 const unsigned char *in, size_t len);
1018
1019static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
1020 const unsigned char *key,
1021 const unsigned char *iv, int enc)
1022{
1023 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1024 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1025
1026 if (keylen <= 0) {
1027 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1028 return 0;
1029 }
1030 cctx->fc = S390X_AES_FC(keylen);
1031 if (!enc)
1032 cctx->fc |= S390X_DECRYPT;
1033
1034 memcpy(cctx->km.param.k, key, keylen);
1035 return 1;
1036}
1037
1038static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1039 const unsigned char *in, size_t len)
1040{
1041 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1042
1043 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
1044 return 1;
1045}
1046
1047static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
1048 const unsigned char *key,
1049 const unsigned char *ivec, int enc)
1050{
1051 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1052 const unsigned char *iv = ctx->oiv;
1053 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1054 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1055
1056 if (keylen <= 0) {
1057 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1058 return 0;
1059 }
1060 if (ivlen <= 0) {
1061 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1062 return 0;
1063 }
1064 memcpy(cctx->kmo.param.cv, iv, ivlen);
1065 memcpy(cctx->kmo.param.k, key, keylen);
1066 cctx->fc = S390X_AES_FC(keylen);
1067 return 1;
1068}
1069
1070static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1071 const unsigned char *in, size_t len)
1072{
1073 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1074 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1075 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1076 int n = ctx->num;
1077 int rem;
1078
1079 memcpy(cctx->kmo.param.cv, iv, ivlen);
1080 while (n && len) {
1081 *out = *in ^ cctx->kmo.param.cv[n];
1082 n = (n + 1) & 0xf;
1083 --len;
1084 ++in;
1085 ++out;
1086 }
1087
1088 rem = len & 0xf;
1089
1090 len &= ~(size_t)0xf;
1091 if (len) {
1092 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1093
1094 out += len;
1095 in += len;
1096 }
1097
1098 if (rem) {
1099 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1100 cctx->kmo.param.k);
1101
1102 while (rem--) {
1103 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1104 ++n;
1105 }
1106 }
1107
1108 memcpy(iv, cctx->kmo.param.cv, ivlen);
1109 ctx->num = n;
1110 return 1;
1111}
1112
1113static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1114 const unsigned char *key,
1115 const unsigned char *ivec, int enc)
1116{
1117 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1118 const unsigned char *iv = ctx->oiv;
1119 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1120 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1121
1122 if (keylen <= 0) {
1123 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1124 return 0;
1125 }
1126 if (ivlen <= 0) {
1127 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1128 return 0;
1129 }
1130 cctx->fc = S390X_AES_FC(keylen);
1131 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1132 if (!enc)
1133 cctx->fc |= S390X_DECRYPT;
1134
1135 memcpy(cctx->kmf.param.cv, iv, ivlen);
1136 memcpy(cctx->kmf.param.k, key, keylen);
1137 return 1;
1138}
1139
1140static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1141 const unsigned char *in, size_t len)
1142{
1143 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1144 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1145 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1146 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1147 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1148 int n = ctx->num;
1149 int rem;
1150 unsigned char tmp;
1151
1152 if (keylen <= 0) {
1153 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1154 return 0;
1155 }
1156 if (ivlen <= 0) {
1157 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1158 return 0;
1159 }
1160 memcpy(cctx->kmf.param.cv, iv, ivlen);
1161 while (n && len) {
1162 tmp = *in;
1163 *out = cctx->kmf.param.cv[n] ^ tmp;
1164 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1165 n = (n + 1) & 0xf;
1166 --len;
1167 ++in;
1168 ++out;
1169 }
1170
1171 rem = len & 0xf;
1172
1173 len &= ~(size_t)0xf;
1174 if (len) {
1175 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1176
1177 out += len;
1178 in += len;
1179 }
1180
1181 if (rem) {
1182 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1183 S390X_AES_FC(keylen), cctx->kmf.param.k);
1184
1185 while (rem--) {
1186 tmp = in[n];
1187 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1188 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1189 ++n;
1190 }
1191 }
1192
1193 memcpy(iv, cctx->kmf.param.cv, ivlen);
1194 ctx->num = n;
1195 return 1;
1196}
1197
1198static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1199 const unsigned char *key,
1200 const unsigned char *ivec, int enc)
1201{
1202 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1203 const unsigned char *iv = ctx->oiv;
1204 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1205 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1206
1207 if (keylen <= 0) {
1208 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1209 return 0;
1210 }
1211 if (ivlen <= 0) {
1212 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1213 return 0;
1214 }
1215 cctx->fc = S390X_AES_FC(keylen);
1216 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1217 if (!enc)
1218 cctx->fc |= S390X_DECRYPT;
1219
1220 memcpy(cctx->kmf.param.cv, iv, ivlen);
1221 memcpy(cctx->kmf.param.k, key, keylen);
1222 return 1;
1223}
1224
1225static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1226 const unsigned char *in, size_t len)
1227{
1228 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1229 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1230 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1231
1232 memcpy(cctx->kmf.param.cv, iv, ivlen);
1233 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1234 memcpy(iv, cctx->kmf.param.cv, ivlen);
1235 return 1;
1236}
1237
1238# define s390x_aes_cfb1_init_key aes_init_key
1239
1240# define s390x_aes_cfb1_cipher aes_cfb1_cipher
1241static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1242 const unsigned char *in, size_t len);
1243
1244# define S390X_AES_CTR_CTX EVP_AES_KEY
1245
1246# define s390x_aes_ctr_init_key aes_init_key
1247
1248# define s390x_aes_ctr_cipher aes_ctr_cipher
1249static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1250 const unsigned char *in, size_t len);
1251
1252/* iv + padding length for iv lengths != 12 */
1253# define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1254
1255/*-
1256 * Process additional authenticated data. Returns 0 on success. Code is
1257 * big-endian.
1258 */
1259static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1260 size_t len)
1261{
1262 unsigned long long alen;
1263 int n, rem;
1264
1265 if (ctx->kma.param.tpcl)
1266 return -2;
1267
1268 alen = ctx->kma.param.taadl + len;
1269 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1270 return -1;
1271 ctx->kma.param.taadl = alen;
1272
1273 n = ctx->areslen;
1274 if (n) {
1275 while (n && len) {
1276 ctx->ares[n] = *aad;
1277 n = (n + 1) & 0xf;
1278 ++aad;
1279 --len;
1280 }
1281 /* ctx->ares contains a complete block if offset has wrapped around */
1282 if (!n) {
1283 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1284 ctx->fc |= S390X_KMA_HS;
1285 }
1286 ctx->areslen = n;
1287 }
1288
1289 rem = len & 0xf;
1290
1291 len &= ~(size_t)0xf;
1292 if (len) {
1293 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1294 aad += len;
1295 ctx->fc |= S390X_KMA_HS;
1296 }
1297
1298 if (rem) {
1299 ctx->areslen = rem;
1300
1301 do {
1302 --rem;
1303 ctx->ares[rem] = aad[rem];
1304 } while (rem);
1305 }
1306 return 0;
1307}
1308
1309/*-
1310 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1311 * success. Code is big-endian.
1312 */
1313static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1314 unsigned char *out, size_t len)
1315{
1316 const unsigned char *inptr;
1317 unsigned long long mlen;
1318 union {
1319 unsigned int w[4];
1320 unsigned char b[16];
1321 } buf;
1322 size_t inlen;
1323 int n, rem, i;
1324
1325 mlen = ctx->kma.param.tpcl + len;
1326 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1327 return -1;
1328 ctx->kma.param.tpcl = mlen;
1329
1330 n = ctx->mreslen;
1331 if (n) {
1332 inptr = in;
1333 inlen = len;
1334 while (n && inlen) {
1335 ctx->mres[n] = *inptr;
1336 n = (n + 1) & 0xf;
1337 ++inptr;
1338 --inlen;
1339 }
1340 /* ctx->mres contains a complete block if offset has wrapped around */
1341 if (!n) {
1342 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1343 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1344 ctx->fc |= S390X_KMA_HS;
1345 ctx->areslen = 0;
1346
1347 /* previous call already encrypted/decrypted its remainder,
1348 * see comment below */
1349 n = ctx->mreslen;
1350 while (n) {
1351 *out = buf.b[n];
1352 n = (n + 1) & 0xf;
1353 ++out;
1354 ++in;
1355 --len;
1356 }
1357 ctx->mreslen = 0;
1358 }
1359 }
1360
1361 rem = len & 0xf;
1362
1363 len &= ~(size_t)0xf;
1364 if (len) {
1365 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1366 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1367 in += len;
1368 out += len;
1369 ctx->fc |= S390X_KMA_HS;
1370 ctx->areslen = 0;
1371 }
1372
1373 /*-
1374 * If there is a remainder, it has to be saved such that it can be
1375 * processed by kma later. However, we also have to do the for-now
1376 * unauthenticated encryption/decryption part here and now...
1377 */
1378 if (rem) {
1379 if (!ctx->mreslen) {
1380 buf.w[0] = ctx->kma.param.j0.w[0];
1381 buf.w[1] = ctx->kma.param.j0.w[1];
1382 buf.w[2] = ctx->kma.param.j0.w[2];
1383 buf.w[3] = ctx->kma.param.cv.w + 1;
1384 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1385 }
1386
1387 n = ctx->mreslen;
1388 for (i = 0; i < rem; i++) {
1389 ctx->mres[n + i] = in[i];
1390 out[i] = in[i] ^ ctx->kres[n + i];
1391 }
1392
1393 ctx->mreslen += rem;
1394 }
1395 return 0;
1396}
1397
1398/*-
1399 * Initialize context structure. Code is big-endian.
1400 */
1401static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1402 const unsigned char *iv)
1403{
1404 ctx->kma.param.t.g[0] = 0;
1405 ctx->kma.param.t.g[1] = 0;
1406 ctx->kma.param.tpcl = 0;
1407 ctx->kma.param.taadl = 0;
1408 ctx->mreslen = 0;
1409 ctx->areslen = 0;
1410 ctx->kreslen = 0;
1411
1412 if (ctx->ivlen == 12) {
1413 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1414 ctx->kma.param.j0.w[3] = 1;
1415 ctx->kma.param.cv.w = 1;
1416 } else {
1417 /* ctx->iv has the right size and is already padded. */
1418 memcpy(ctx->iv, iv, ctx->ivlen);
1419 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1420 ctx->fc, &ctx->kma.param);
1421 ctx->fc |= S390X_KMA_HS;
1422
1423 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1424 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1425 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1426 ctx->kma.param.t.g[0] = 0;
1427 ctx->kma.param.t.g[1] = 0;
1428 }
1429}
1430
1431/*-
1432 * Performs various operations on the context structure depending on control
1433 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1434 * Code is big-endian.
1435 */
1436static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1437{
1438 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1439 S390X_AES_GCM_CTX *gctx_out;
1440 EVP_CIPHER_CTX *out;
1441 unsigned char *buf;
1442 int ivlen, enc, len;
1443
1444 switch (type) {
1445 case EVP_CTRL_INIT:
1446 ivlen = EVP_CIPHER_get_iv_length(c->cipher);
1447 gctx->key_set = 0;
1448 gctx->iv_set = 0;
1449 gctx->ivlen = ivlen;
1450 gctx->iv = c->iv;
1451 gctx->taglen = -1;
1452 gctx->iv_gen = 0;
1453 gctx->tls_aad_len = -1;
1454 return 1;
1455
1456 case EVP_CTRL_GET_IVLEN:
1457 *(int *)ptr = gctx->ivlen;
1458 return 1;
1459
1460 case EVP_CTRL_AEAD_SET_IVLEN:
1461 if (arg <= 0)
1462 return 0;
1463
1464 if (arg != 12) {
1465 len = S390X_gcm_ivpadlen(arg);
1466
1467 /* Allocate memory for iv if needed. */
1468 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1469 if (gctx->iv != c->iv)
1470 OPENSSL_free(gctx->iv);
1471
1472 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1473 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1474 return 0;
1475 }
1476 }
1477 /* Add padding. */
1478 memset(gctx->iv + arg, 0, len - arg - 8);
1479 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1480 }
1481 gctx->ivlen = arg;
1482 return 1;
1483
1484 case EVP_CTRL_AEAD_SET_TAG:
1485 buf = EVP_CIPHER_CTX_buf_noconst(c);
1486 enc = EVP_CIPHER_CTX_is_encrypting(c);
1487 if (arg <= 0 || arg > 16 || enc)
1488 return 0;
1489
1490 memcpy(buf, ptr, arg);
1491 gctx->taglen = arg;
1492 return 1;
1493
1494 case EVP_CTRL_AEAD_GET_TAG:
1495 enc = EVP_CIPHER_CTX_is_encrypting(c);
1496 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1497 return 0;
1498
1499 memcpy(ptr, gctx->kma.param.t.b, arg);
1500 return 1;
1501
1502 case EVP_CTRL_GCM_SET_IV_FIXED:
1503 /* Special case: -1 length restores whole iv */
1504 if (arg == -1) {
1505 memcpy(gctx->iv, ptr, gctx->ivlen);
1506 gctx->iv_gen = 1;
1507 return 1;
1508 }
1509 /*
1510 * Fixed field must be at least 4 bytes and invocation field at least
1511 * 8.
1512 */
1513 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1514 return 0;
1515
1516 if (arg)
1517 memcpy(gctx->iv, ptr, arg);
1518
1519 enc = EVP_CIPHER_CTX_is_encrypting(c);
1520 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1521 return 0;
1522
1523 gctx->iv_gen = 1;
1524 return 1;
1525
1526 case EVP_CTRL_GCM_IV_GEN:
1527 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1528 return 0;
1529
1530 s390x_aes_gcm_setiv(gctx, gctx->iv);
1531
1532 if (arg <= 0 || arg > gctx->ivlen)
1533 arg = gctx->ivlen;
1534
1535 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1536 /*
1537 * Invocation field will be at least 8 bytes in size and so no need
1538 * to check wrap around or increment more than last 8 bytes.
1539 */
1540 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1541 gctx->iv_set = 1;
1542 return 1;
1543
1544 case EVP_CTRL_GCM_SET_IV_INV:
1545 enc = EVP_CIPHER_CTX_is_encrypting(c);
1546 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1547 return 0;
1548
1549 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1550 s390x_aes_gcm_setiv(gctx, gctx->iv);
1551 gctx->iv_set = 1;
1552 return 1;
1553
1554 case EVP_CTRL_AEAD_TLS1_AAD:
1555 /* Save the aad for later use. */
1556 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1557 return 0;
1558
1559 buf = EVP_CIPHER_CTX_buf_noconst(c);
1560 memcpy(buf, ptr, arg);
1561 gctx->tls_aad_len = arg;
1562 gctx->tls_enc_records = 0;
1563
1564 len = buf[arg - 2] << 8 | buf[arg - 1];
1565 /* Correct length for explicit iv. */
1566 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1567 return 0;
1568 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1569
1570 /* If decrypting correct for tag too. */
1571 enc = EVP_CIPHER_CTX_is_encrypting(c);
1572 if (!enc) {
1573 if (len < EVP_GCM_TLS_TAG_LEN)
1574 return 0;
1575 len -= EVP_GCM_TLS_TAG_LEN;
1576 }
1577 buf[arg - 2] = len >> 8;
1578 buf[arg - 1] = len & 0xff;
1579 /* Extra padding: tag appended to record. */
1580 return EVP_GCM_TLS_TAG_LEN;
1581
1582 case EVP_CTRL_COPY:
1583 out = ptr;
1584 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1585
1586 if (gctx->iv == c->iv) {
1587 gctx_out->iv = out->iv;
1588 } else {
1589 len = S390X_gcm_ivpadlen(gctx->ivlen);
1590
1591 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1592 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1593 return 0;
1594 }
1595
1596 memcpy(gctx_out->iv, gctx->iv, len);
1597 }
1598 return 1;
1599
1600 default:
1601 return -1;
1602 }
1603}
1604
1605/*-
1606 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1607 */
1608static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1609 const unsigned char *key,
1610 const unsigned char *iv, int enc)
1611{
1612 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1613 int keylen;
1614
1615 if (iv == NULL && key == NULL)
1616 return 1;
1617
1618 if (key != NULL) {
1619 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1620 if (keylen <= 0) {
1621 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1622 return 0;
1623 }
1624
1625 memcpy(&gctx->kma.param.k, key, keylen);
1626
1627 gctx->fc = S390X_AES_FC(keylen);
1628 if (!enc)
1629 gctx->fc |= S390X_DECRYPT;
1630
1631 if (iv == NULL && gctx->iv_set)
1632 iv = gctx->iv;
1633
1634 if (iv != NULL) {
1635 s390x_aes_gcm_setiv(gctx, iv);
1636 gctx->iv_set = 1;
1637 }
1638 gctx->key_set = 1;
1639 } else {
1640 if (gctx->key_set)
1641 s390x_aes_gcm_setiv(gctx, iv);
1642 else
1643 memcpy(gctx->iv, iv, gctx->ivlen);
1644
1645 gctx->iv_set = 1;
1646 gctx->iv_gen = 0;
1647 }
1648 return 1;
1649}
1650
1651/*-
1652 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1653 * if successful. Otherwise -1 is returned. Code is big-endian.
1654 */
1655static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1656 const unsigned char *in, size_t len)
1657{
1658 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1659 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1660 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1661 int rv = -1;
1662
1663 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1664 return -1;
1665
1666 /*
1667 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1668 * Requirements from SP 800-38D". The requirements is for one party to the
1669 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1670 * side only.
1671 */
1672 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1673 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
1674 goto err;
1675 }
1676
1677 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1678 : EVP_CTRL_GCM_SET_IV_INV,
1679 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1680 goto err;
1681
1682 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1683 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1684 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1685
1686 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1687 gctx->kma.param.tpcl = len << 3;
1688 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1689 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1690
1691 if (enc) {
1692 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1693 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1694 } else {
1695 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1696 EVP_GCM_TLS_TAG_LEN)) {
1697 OPENSSL_cleanse(out, len);
1698 goto err;
1699 }
1700 rv = len;
1701 }
1702err:
1703 gctx->iv_set = 0;
1704 gctx->tls_aad_len = -1;
1705 return rv;
1706}
1707
1708/*-
1709 * Called from EVP layer to initialize context, process additional
1710 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1711 * ciphertext or process a TLS packet, depending on context. Returns bytes
1712 * written on success. Otherwise -1 is returned. Code is big-endian.
1713 */
1714static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1715 const unsigned char *in, size_t len)
1716{
1717 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1718 unsigned char *buf, tmp[16];
1719 int enc;
1720
1721 if (!gctx->key_set)
1722 return -1;
1723
1724 if (gctx->tls_aad_len >= 0)
1725 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1726
1727 if (!gctx->iv_set)
1728 return -1;
1729
1730 if (in != NULL) {
1731 if (out == NULL) {
1732 if (s390x_aes_gcm_aad(gctx, in, len))
1733 return -1;
1734 } else {
1735 if (s390x_aes_gcm(gctx, in, out, len))
1736 return -1;
1737 }
1738 return len;
1739 } else {
1740 gctx->kma.param.taadl <<= 3;
1741 gctx->kma.param.tpcl <<= 3;
1742 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1743 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1744 /* recall that we already did en-/decrypt gctx->mres
1745 * and returned it to caller... */
1746 OPENSSL_cleanse(tmp, gctx->mreslen);
1747 gctx->iv_set = 0;
1748
1749 enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1750 if (enc) {
1751 gctx->taglen = 16;
1752 } else {
1753 if (gctx->taglen < 0)
1754 return -1;
1755
1756 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1757 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1758 return -1;
1759 }
1760 return 0;
1761 }
1762}
1763
1764static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1765{
1766 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1767
1768 if (gctx == NULL)
1769 return 0;
1770
1771 if (gctx->iv != c->iv)
1772 OPENSSL_free(gctx->iv);
1773
1774 OPENSSL_cleanse(gctx, sizeof(*gctx));
1775 return 1;
1776}
1777
1778# define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1779
1780# define s390x_aes_xts_init_key aes_xts_init_key
1781static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1782 const unsigned char *key,
1783 const unsigned char *iv, int enc);
1784# define s390x_aes_xts_cipher aes_xts_cipher
1785static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1786 const unsigned char *in, size_t len);
1787# define s390x_aes_xts_ctrl aes_xts_ctrl
1788static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1789# define s390x_aes_xts_cleanup aes_xts_cleanup
1790
1791/*-
1792 * Set nonce and length fields. Code is big-endian.
1793 */
1794static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1795 const unsigned char *nonce,
1796 size_t mlen)
1797{
1798 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1799 ctx->aes.ccm.nonce.g[1] = mlen;
1800 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1801}
1802
1803/*-
1804 * Process additional authenticated data. Code is big-endian.
1805 */
1806static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1807 size_t alen)
1808{
1809 unsigned char *ptr;
1810 int i, rem;
1811
1812 if (!alen)
1813 return;
1814
1815 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1816
1817 /* Suppress 'type-punned pointer dereference' warning. */
1818 ptr = ctx->aes.ccm.buf.b;
1819
1820 if (alen < ((1 << 16) - (1 << 8))) {
1821 *(uint16_t *)ptr = alen;
1822 i = 2;
1823 } else if (sizeof(alen) == 8
1824 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1825 *(uint16_t *)ptr = 0xffff;
1826 *(uint64_t *)(ptr + 2) = alen;
1827 i = 10;
1828 } else {
1829 *(uint16_t *)ptr = 0xfffe;
1830 *(uint32_t *)(ptr + 2) = alen;
1831 i = 6;
1832 }
1833
1834 while (i < 16 && alen) {
1835 ctx->aes.ccm.buf.b[i] = *aad;
1836 ++aad;
1837 --alen;
1838 ++i;
1839 }
1840 while (i < 16) {
1841 ctx->aes.ccm.buf.b[i] = 0;
1842 ++i;
1843 }
1844
1845 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1846 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1847 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1848 &ctx->aes.ccm.kmac_param);
1849 ctx->aes.ccm.blocks += 2;
1850
1851 rem = alen & 0xf;
1852 alen &= ~(size_t)0xf;
1853 if (alen) {
1854 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1855 ctx->aes.ccm.blocks += alen >> 4;
1856 aad += alen;
1857 }
1858 if (rem) {
1859 for (i = 0; i < rem; i++)
1860 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1861
1862 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1863 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1864 ctx->aes.ccm.kmac_param.k);
1865 ctx->aes.ccm.blocks++;
1866 }
1867}
1868
1869/*-
1870 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1871 * success.
1872 */
1873static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1874 unsigned char *out, size_t len, int enc)
1875{
1876 size_t n, rem;
1877 unsigned int i, l, num;
1878 unsigned char flags;
1879
1880 flags = ctx->aes.ccm.nonce.b[0];
1881 if (!(flags & S390X_CCM_AAD_FLAG)) {
1882 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1883 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1884 ctx->aes.ccm.blocks++;
1885 }
1886 l = flags & 0x7;
1887 ctx->aes.ccm.nonce.b[0] = l;
1888
1889 /*-
1890 * Reconstruct length from encoded length field
1891 * and initialize it with counter value.
1892 */
1893 n = 0;
1894 for (i = 15 - l; i < 15; i++) {
1895 n |= ctx->aes.ccm.nonce.b[i];
1896 ctx->aes.ccm.nonce.b[i] = 0;
1897 n <<= 8;
1898 }
1899 n |= ctx->aes.ccm.nonce.b[15];
1900 ctx->aes.ccm.nonce.b[15] = 1;
1901
1902 if (n != len)
1903 return -1; /* length mismatch */
1904
1905 if (enc) {
1906 /* Two operations per block plus one for tag encryption */
1907 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1908 if (ctx->aes.ccm.blocks > (1ULL << 61))
1909 return -2; /* too much data */
1910 }
1911
1912 num = 0;
1913 rem = len & 0xf;
1914 len &= ~(size_t)0xf;
1915
1916 if (enc) {
1917 /* mac-then-encrypt */
1918 if (len)
1919 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1920 if (rem) {
1921 for (i = 0; i < rem; i++)
1922 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1923
1924 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1925 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1926 ctx->aes.ccm.kmac_param.k);
1927 }
1928
1929 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1930 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1931 &num, (ctr128_f)AES_ctr32_encrypt);
1932 } else {
1933 /* decrypt-then-mac */
1934 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1935 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1936 &num, (ctr128_f)AES_ctr32_encrypt);
1937
1938 if (len)
1939 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1940 if (rem) {
1941 for (i = 0; i < rem; i++)
1942 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1943
1944 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1945 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1946 ctx->aes.ccm.kmac_param.k);
1947 }
1948 }
1949 /* encrypt tag */
1950 for (i = 15 - l; i < 16; i++)
1951 ctx->aes.ccm.nonce.b[i] = 0;
1952
1953 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1954 ctx->aes.ccm.kmac_param.k);
1955 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1956 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1957
1958 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1959 return 0;
1960}
1961
1962/*-
1963 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1964 * if successful. Otherwise -1 is returned.
1965 */
1966static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1967 const unsigned char *in, size_t len)
1968{
1969 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1970 unsigned char *ivec = ctx->iv;
1971 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1972 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1973
1974 if (out != in
1975 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1976 return -1;
1977
1978 if (enc) {
1979 /* Set explicit iv (sequence number). */
1980 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1981 }
1982
1983 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1984 /*-
1985 * Get explicit iv (sequence number). We already have fixed iv
1986 * (server/client_write_iv) here.
1987 */
1988 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1989 s390x_aes_ccm_setiv(cctx, ivec, len);
1990
1991 /* Process aad (sequence number|type|version|length) */
1992 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1993
1994 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1995 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
1996
1997 if (enc) {
1998 if (s390x_aes_ccm(cctx, in, out, len, enc))
1999 return -1;
2000
2001 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2002 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
2003 } else {
2004 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2005 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
2006 cctx->aes.ccm.m))
2007 return len;
2008 }
2009
2010 OPENSSL_cleanse(out, len);
2011 return -1;
2012 }
2013}
2014
2015/*-
2016 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
2017 * returned.
2018 */
2019static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
2020 const unsigned char *key,
2021 const unsigned char *iv, int enc)
2022{
2023 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2024 int keylen;
2025
2026 if (iv == NULL && key == NULL)
2027 return 1;
2028
2029 if (key != NULL) {
2030 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
2031 if (keylen <= 0) {
2032 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2033 return 0;
2034 }
2035
2036 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
2037 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
2038
2039 /* Store encoded m and l. */
2040 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
2041 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
2042 memset(cctx->aes.ccm.nonce.b + 1, 0,
2043 sizeof(cctx->aes.ccm.nonce.b));
2044 cctx->aes.ccm.blocks = 0;
2045
2046 cctx->aes.ccm.key_set = 1;
2047 }
2048
2049 if (iv != NULL) {
2050 memcpy(ctx->iv, iv, 15 - cctx->aes.ccm.l);
2051
2052 cctx->aes.ccm.iv_set = 1;
2053 }
2054
2055 return 1;
2056}
2057
2058/*-
2059 * Called from EVP layer to initialize context, process additional
2060 * authenticated data, en/de-crypt plain/cipher-text and authenticate
2061 * plaintext or process a TLS packet, depending on context. Returns bytes
2062 * written on success. Otherwise -1 is returned.
2063 */
2064static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2065 const unsigned char *in, size_t len)
2066{
2067 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2068 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
2069 int rv;
2070 unsigned char *buf;
2071
2072 if (!cctx->aes.ccm.key_set)
2073 return -1;
2074
2075 if (cctx->aes.ccm.tls_aad_len >= 0)
2076 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
2077
2078 /*-
2079 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
2080 * so integrity must be checked already at Update() i.e., before
2081 * potentially corrupted data is output.
2082 */
2083 if (in == NULL && out != NULL)
2084 return 0;
2085
2086 if (!cctx->aes.ccm.iv_set)
2087 return -1;
2088
2089 if (out == NULL) {
2090 /* Update(): Pass message length. */
2091 if (in == NULL) {
2092 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2093
2094 cctx->aes.ccm.len_set = 1;
2095 return len;
2096 }
2097
2098 /* Update(): Process aad. */
2099 if (!cctx->aes.ccm.len_set && len)
2100 return -1;
2101
2102 s390x_aes_ccm_aad(cctx, in, len);
2103 return len;
2104 }
2105
2106 /* The tag must be set before actually decrypting data */
2107 if (!enc && !cctx->aes.ccm.tag_set)
2108 return -1;
2109
2110 /* Update(): Process message. */
2111
2112 if (!cctx->aes.ccm.len_set) {
2113 /*-
2114 * In case message length was not previously set explicitly via
2115 * Update(), set it now.
2116 */
2117 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2118
2119 cctx->aes.ccm.len_set = 1;
2120 }
2121
2122 if (enc) {
2123 if (s390x_aes_ccm(cctx, in, out, len, enc))
2124 return -1;
2125
2126 cctx->aes.ccm.tag_set = 1;
2127 return len;
2128 } else {
2129 rv = -1;
2130
2131 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2132 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2133 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2134 cctx->aes.ccm.m))
2135 rv = len;
2136 }
2137
2138 if (rv == -1)
2139 OPENSSL_cleanse(out, len);
2140
2141 cctx->aes.ccm.iv_set = 0;
2142 cctx->aes.ccm.tag_set = 0;
2143 cctx->aes.ccm.len_set = 0;
2144 return rv;
2145 }
2146}
2147
2148/*-
2149 * Performs various operations on the context structure depending on control
2150 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2151 * Code is big-endian.
2152 */
2153static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2154{
2155 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2156 unsigned char *buf;
2157 int enc, len;
2158
2159 switch (type) {
2160 case EVP_CTRL_INIT:
2161 cctx->aes.ccm.key_set = 0;
2162 cctx->aes.ccm.iv_set = 0;
2163 cctx->aes.ccm.l = 8;
2164 cctx->aes.ccm.m = 12;
2165 cctx->aes.ccm.tag_set = 0;
2166 cctx->aes.ccm.len_set = 0;
2167 cctx->aes.ccm.tls_aad_len = -1;
2168 return 1;
2169
2170 case EVP_CTRL_GET_IVLEN:
2171 *(int *)ptr = 15 - cctx->aes.ccm.l;
2172 return 1;
2173
2174 case EVP_CTRL_AEAD_TLS1_AAD:
2175 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2176 return 0;
2177
2178 /* Save the aad for later use. */
2179 buf = EVP_CIPHER_CTX_buf_noconst(c);
2180 memcpy(buf, ptr, arg);
2181 cctx->aes.ccm.tls_aad_len = arg;
2182
2183 len = buf[arg - 2] << 8 | buf[arg - 1];
2184 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2185 return 0;
2186
2187 /* Correct length for explicit iv. */
2188 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2189
2190 enc = EVP_CIPHER_CTX_is_encrypting(c);
2191 if (!enc) {
2192 if (len < cctx->aes.ccm.m)
2193 return 0;
2194
2195 /* Correct length for tag. */
2196 len -= cctx->aes.ccm.m;
2197 }
2198
2199 buf[arg - 2] = len >> 8;
2200 buf[arg - 1] = len & 0xff;
2201
2202 /* Extra padding: tag appended to record. */
2203 return cctx->aes.ccm.m;
2204
2205 case EVP_CTRL_CCM_SET_IV_FIXED:
2206 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2207 return 0;
2208
2209 /* Copy to first part of the iv. */
2210 memcpy(c->iv, ptr, arg);
2211 return 1;
2212
2213 case EVP_CTRL_AEAD_SET_IVLEN:
2214 arg = 15 - arg;
2215 /* fall-through */
2216
2217 case EVP_CTRL_CCM_SET_L:
2218 if (arg < 2 || arg > 8)
2219 return 0;
2220
2221 cctx->aes.ccm.l = arg;
2222 return 1;
2223
2224 case EVP_CTRL_AEAD_SET_TAG:
2225 if ((arg & 1) || arg < 4 || arg > 16)
2226 return 0;
2227
2228 enc = EVP_CIPHER_CTX_is_encrypting(c);
2229 if (enc && ptr)
2230 return 0;
2231
2232 if (ptr) {
2233 cctx->aes.ccm.tag_set = 1;
2234 buf = EVP_CIPHER_CTX_buf_noconst(c);
2235 memcpy(buf, ptr, arg);
2236 }
2237
2238 cctx->aes.ccm.m = arg;
2239 return 1;
2240
2241 case EVP_CTRL_AEAD_GET_TAG:
2242 enc = EVP_CIPHER_CTX_is_encrypting(c);
2243 if (!enc || !cctx->aes.ccm.tag_set)
2244 return 0;
2245
2246 if(arg < cctx->aes.ccm.m)
2247 return 0;
2248
2249 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2250 cctx->aes.ccm.tag_set = 0;
2251 cctx->aes.ccm.iv_set = 0;
2252 cctx->aes.ccm.len_set = 0;
2253 return 1;
2254
2255 case EVP_CTRL_COPY:
2256 return 1;
2257
2258 default:
2259 return -1;
2260 }
2261}
2262
2263# define s390x_aes_ccm_cleanup aes_ccm_cleanup
2264
2265# ifndef OPENSSL_NO_OCB
2266# define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2267
2268# define s390x_aes_ocb_init_key aes_ocb_init_key
2269static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2270 const unsigned char *iv, int enc);
2271# define s390x_aes_ocb_cipher aes_ocb_cipher
2272static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2273 const unsigned char *in, size_t len);
2274# define s390x_aes_ocb_cleanup aes_ocb_cleanup
2275static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2276# define s390x_aes_ocb_ctrl aes_ocb_ctrl
2277static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2278# endif
2279
2280# ifndef OPENSSL_NO_SIV
2281# define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2282
2283# define s390x_aes_siv_init_key aes_siv_init_key
2284# define s390x_aes_siv_cipher aes_siv_cipher
2285# define s390x_aes_siv_cleanup aes_siv_cleanup
2286# define s390x_aes_siv_ctrl aes_siv_ctrl
2287# endif
2288
2289# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2290 MODE,flags) \
2291static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2292 nid##_##keylen##_##nmode,blocksize, \
2293 keylen / 8, \
2294 ivlen, \
2295 flags | EVP_CIPH_##MODE##_MODE, \
2296 EVP_ORIG_GLOBAL, \
2297 s390x_aes_##mode##_init_key, \
2298 s390x_aes_##mode##_cipher, \
2299 NULL, \
2300 sizeof(S390X_AES_##MODE##_CTX), \
2301 NULL, \
2302 NULL, \
2303 NULL, \
2304 NULL \
2305}; \
2306static const EVP_CIPHER aes_##keylen##_##mode = { \
2307 nid##_##keylen##_##nmode, \
2308 blocksize, \
2309 keylen / 8, \
2310 ivlen, \
2311 flags | EVP_CIPH_##MODE##_MODE, \
2312 EVP_ORIG_GLOBAL, \
2313 aes_init_key, \
2314 aes_##mode##_cipher, \
2315 NULL, \
2316 sizeof(EVP_AES_KEY), \
2317 NULL, \
2318 NULL, \
2319 NULL, \
2320 NULL \
2321}; \
2322const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2323{ \
2324 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2325 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2326}
2327
2328# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2329static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2330 nid##_##keylen##_##mode, \
2331 blocksize, \
2332 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2333 ivlen, \
2334 flags | EVP_CIPH_##MODE##_MODE, \
2335 EVP_ORIG_GLOBAL, \
2336 s390x_aes_##mode##_init_key, \
2337 s390x_aes_##mode##_cipher, \
2338 s390x_aes_##mode##_cleanup, \
2339 sizeof(S390X_AES_##MODE##_CTX), \
2340 NULL, \
2341 NULL, \
2342 s390x_aes_##mode##_ctrl, \
2343 NULL \
2344}; \
2345static const EVP_CIPHER aes_##keylen##_##mode = { \
2346 nid##_##keylen##_##mode,blocksize, \
2347 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2348 ivlen, \
2349 flags | EVP_CIPH_##MODE##_MODE, \
2350 EVP_ORIG_GLOBAL, \
2351 aes_##mode##_init_key, \
2352 aes_##mode##_cipher, \
2353 aes_##mode##_cleanup, \
2354 sizeof(EVP_AES_##MODE##_CTX), \
2355 NULL, \
2356 NULL, \
2357 aes_##mode##_ctrl, \
2358 NULL \
2359}; \
2360const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2361{ \
2362 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2363 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2364}
2365
2366#else
2367
2368# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2369static const EVP_CIPHER aes_##keylen##_##mode = { \
2370 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2371 flags|EVP_CIPH_##MODE##_MODE, \
2372 EVP_ORIG_GLOBAL, \
2373 aes_init_key, \
2374 aes_##mode##_cipher, \
2375 NULL, \
2376 sizeof(EVP_AES_KEY), \
2377 NULL,NULL,NULL,NULL }; \
2378const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2379{ return &aes_##keylen##_##mode; }
2380
2381# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2382static const EVP_CIPHER aes_##keylen##_##mode = { \
2383 nid##_##keylen##_##mode,blocksize, \
2384 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2385 ivlen, \
2386 flags|EVP_CIPH_##MODE##_MODE, \
2387 EVP_ORIG_GLOBAL, \
2388 aes_##mode##_init_key, \
2389 aes_##mode##_cipher, \
2390 aes_##mode##_cleanup, \
2391 sizeof(EVP_AES_##MODE##_CTX), \
2392 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2393const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2394{ return &aes_##keylen##_##mode; }
2395
2396#endif
2397
2398#define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2399 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2400 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2401 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2402 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2403 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2404 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2405 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2406
2407static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2408 const unsigned char *iv, int enc)
2409{
2410 int ret, mode;
2411 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2412 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2413
2414 if (keylen <= 0) {
2415 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2416 return 0;
2417 }
2418
2419 mode = EVP_CIPHER_CTX_get_mode(ctx);
2420 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2421 && !enc) {
2422#ifdef HWAES_CAPABLE
2423 if (HWAES_CAPABLE) {
2424 ret = HWAES_set_decrypt_key(key, keylen, &dat->ks.ks);
2425 dat->block = (block128_f) HWAES_decrypt;
2426 dat->stream.cbc = NULL;
2427# ifdef HWAES_cbc_encrypt
2428 if (mode == EVP_CIPH_CBC_MODE)
2429 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2430# endif
2431 } else
2432#endif
2433#ifdef BSAES_CAPABLE
2434 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2435 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2436 dat->block = (block128_f) AES_decrypt;
2437 dat->stream.cbc = (cbc128_f) ossl_bsaes_cbc_encrypt;
2438 } else
2439#endif
2440#ifdef VPAES_CAPABLE
2441 if (VPAES_CAPABLE) {
2442 ret = vpaes_set_decrypt_key(key, keylen, &dat->ks.ks);
2443 dat->block = (block128_f) vpaes_decrypt;
2444 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2445 (cbc128_f) vpaes_cbc_encrypt : NULL;
2446 } else
2447#endif
2448 {
2449 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2450 dat->block = (block128_f) AES_decrypt;
2451 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2452 (cbc128_f) AES_cbc_encrypt : NULL;
2453 }
2454 } else
2455#ifdef HWAES_CAPABLE
2456 if (HWAES_CAPABLE) {
2457 ret = HWAES_set_encrypt_key(key, keylen, &dat->ks.ks);
2458 dat->block = (block128_f) HWAES_encrypt;
2459 dat->stream.cbc = NULL;
2460# ifdef HWAES_cbc_encrypt
2461 if (mode == EVP_CIPH_CBC_MODE)
2462 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2463 else
2464# endif
2465# ifdef HWAES_ctr32_encrypt_blocks
2466 if (mode == EVP_CIPH_CTR_MODE)
2467 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2468 else
2469# endif
2470 (void)0; /* terminate potentially open 'else' */
2471 } else
2472#endif
2473#ifdef BSAES_CAPABLE
2474 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2475 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2476 dat->block = (block128_f) AES_encrypt;
2477 dat->stream.ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2478 } else
2479#endif
2480#ifdef VPAES_CAPABLE
2481 if (VPAES_CAPABLE) {
2482 ret = vpaes_set_encrypt_key(key, keylen, &dat->ks.ks);
2483 dat->block = (block128_f) vpaes_encrypt;
2484 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2485 (cbc128_f) vpaes_cbc_encrypt : NULL;
2486 } else
2487#endif
2488 {
2489 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2490 dat->block = (block128_f) AES_encrypt;
2491 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2492 (cbc128_f) AES_cbc_encrypt : NULL;
2493#ifdef AES_CTR_ASM
2494 if (mode == EVP_CIPH_CTR_MODE)
2495 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2496#endif
2497 }
2498
2499 if (ret < 0) {
2500 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
2501 return 0;
2502 }
2503
2504 return 1;
2505}
2506
2507static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2508 const unsigned char *in, size_t len)
2509{
2510 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2511
2512 if (dat->stream.cbc)
2513 (*dat->stream.cbc) (in, out, len, &dat->ks, ctx->iv,
2514 EVP_CIPHER_CTX_is_encrypting(ctx));
2515 else if (EVP_CIPHER_CTX_is_encrypting(ctx))
2516 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv,
2517 dat->block);
2518 else
2519 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2520 ctx->iv, dat->block);
2521
2522 return 1;
2523}
2524
2525static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2526 const unsigned char *in, size_t len)
2527{
2528 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
2529 size_t i;
2530 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2531
2532 if (len < bl)
2533 return 1;
2534
2535 for (i = 0, len -= bl; i <= len; i += bl)
2536 (*dat->block) (in + i, out + i, &dat->ks);
2537
2538 return 1;
2539}
2540
2541static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2542 const unsigned char *in, size_t len)
2543{
2544 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2545
2546 int num = EVP_CIPHER_CTX_get_num(ctx);
2547 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2548 ctx->iv, &num, dat->block);
2549 EVP_CIPHER_CTX_set_num(ctx, num);
2550 return 1;
2551}
2552
2553static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2554 const unsigned char *in, size_t len)
2555{
2556 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2557
2558 int num = EVP_CIPHER_CTX_get_num(ctx);
2559 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2560 ctx->iv, &num,
2561 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2562 EVP_CIPHER_CTX_set_num(ctx, num);
2563 return 1;
2564}
2565
2566static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2567 const unsigned char *in, size_t len)
2568{
2569 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2570
2571 int num = EVP_CIPHER_CTX_get_num(ctx);
2572 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2573 ctx->iv, &num,
2574 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2575 EVP_CIPHER_CTX_set_num(ctx, num);
2576 return 1;
2577}
2578
2579static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2580 const unsigned char *in, size_t len)
2581{
2582 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2583
2584 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2585 int num = EVP_CIPHER_CTX_get_num(ctx);
2586 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2587 ctx->iv, &num,
2588 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2589 EVP_CIPHER_CTX_set_num(ctx, num);
2590 return 1;
2591 }
2592
2593 while (len >= MAXBITCHUNK) {
2594 int num = EVP_CIPHER_CTX_get_num(ctx);
2595 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2596 ctx->iv, &num,
2597 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2598 EVP_CIPHER_CTX_set_num(ctx, num);
2599 len -= MAXBITCHUNK;
2600 out += MAXBITCHUNK;
2601 in += MAXBITCHUNK;
2602 }
2603 if (len) {
2604 int num = EVP_CIPHER_CTX_get_num(ctx);
2605 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2606 ctx->iv, &num,
2607 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2608 EVP_CIPHER_CTX_set_num(ctx, num);
2609 }
2610
2611 return 1;
2612}
2613
2614static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2615 const unsigned char *in, size_t len)
2616{
2617 int n = EVP_CIPHER_CTX_get_num(ctx);
2618 unsigned int num;
2619 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2620
2621 if (n < 0)
2622 return 0;
2623 num = (unsigned int)n;
2624
2625 if (dat->stream.ctr)
2626 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2627 ctx->iv,
2628 EVP_CIPHER_CTX_buf_noconst(ctx),
2629 &num, dat->stream.ctr);
2630 else
2631 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2632 ctx->iv,
2633 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2634 dat->block);
2635 EVP_CIPHER_CTX_set_num(ctx, num);
2636 return 1;
2637}
2638
2639BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2640 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2641 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2642
2643static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2644{
2645 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2646 if (gctx == NULL)
2647 return 0;
2648 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2649 if (gctx->iv != c->iv)
2650 OPENSSL_free(gctx->iv);
2651 return 1;
2652}
2653
2654static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2655{
2656 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2657 switch (type) {
2658 case EVP_CTRL_INIT:
2659 gctx->key_set = 0;
2660 gctx->iv_set = 0;
2661 gctx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
2662 gctx->iv = c->iv;
2663 gctx->taglen = -1;
2664 gctx->iv_gen = 0;
2665 gctx->tls_aad_len = -1;
2666 return 1;
2667
2668 case EVP_CTRL_GET_IVLEN:
2669 *(int *)ptr = gctx->ivlen;
2670 return 1;
2671
2672 case EVP_CTRL_AEAD_SET_IVLEN:
2673 if (arg <= 0)
2674 return 0;
2675 /* Allocate memory for IV if needed */
2676 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2677 if (gctx->iv != c->iv)
2678 OPENSSL_free(gctx->iv);
2679 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2680 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2681 return 0;
2682 }
2683 }
2684 gctx->ivlen = arg;
2685 return 1;
2686
2687 case EVP_CTRL_AEAD_SET_TAG:
2688 if (arg <= 0 || arg > 16 || c->encrypt)
2689 return 0;
2690 memcpy(c->buf, ptr, arg);
2691 gctx->taglen = arg;
2692 return 1;
2693
2694 case EVP_CTRL_AEAD_GET_TAG:
2695 if (arg <= 0 || arg > 16 || !c->encrypt
2696 || gctx->taglen < 0)
2697 return 0;
2698 memcpy(ptr, c->buf, arg);
2699 return 1;
2700
2701 case EVP_CTRL_GCM_SET_IV_FIXED:
2702 /* Special case: -1 length restores whole IV */
2703 if (arg == -1) {
2704 memcpy(gctx->iv, ptr, gctx->ivlen);
2705 gctx->iv_gen = 1;
2706 return 1;
2707 }
2708 /*
2709 * Fixed field must be at least 4 bytes and invocation field at least
2710 * 8.
2711 */
2712 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2713 return 0;
2714 if (arg)
2715 memcpy(gctx->iv, ptr, arg);
2716 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2717 return 0;
2718 gctx->iv_gen = 1;
2719 return 1;
2720
2721 case EVP_CTRL_GCM_IV_GEN:
2722 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2723 return 0;
2724 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2725 if (arg <= 0 || arg > gctx->ivlen)
2726 arg = gctx->ivlen;
2727 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2728 /*
2729 * Invocation field will be at least 8 bytes in size and so no need
2730 * to check wrap around or increment more than last 8 bytes.
2731 */
2732 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2733 gctx->iv_set = 1;
2734 return 1;
2735
2736 case EVP_CTRL_GCM_SET_IV_INV:
2737 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2738 return 0;
2739 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2740 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2741 gctx->iv_set = 1;
2742 return 1;
2743
2744 case EVP_CTRL_AEAD_TLS1_AAD:
2745 /* Save the AAD for later use */
2746 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2747 return 0;
2748 memcpy(c->buf, ptr, arg);
2749 gctx->tls_aad_len = arg;
2750 gctx->tls_enc_records = 0;
2751 {
2752 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2753 /* Correct length for explicit IV */
2754 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2755 return 0;
2756 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2757 /* If decrypting correct for tag too */
2758 if (!c->encrypt) {
2759 if (len < EVP_GCM_TLS_TAG_LEN)
2760 return 0;
2761 len -= EVP_GCM_TLS_TAG_LEN;
2762 }
2763 c->buf[arg - 2] = len >> 8;
2764 c->buf[arg - 1] = len & 0xff;
2765 }
2766 /* Extra padding: tag appended to record */
2767 return EVP_GCM_TLS_TAG_LEN;
2768
2769 case EVP_CTRL_COPY:
2770 {
2771 EVP_CIPHER_CTX *out = ptr;
2772 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2773 if (gctx->gcm.key) {
2774 if (gctx->gcm.key != &gctx->ks)
2775 return 0;
2776 gctx_out->gcm.key = &gctx_out->ks;
2777 }
2778 if (gctx->iv == c->iv)
2779 gctx_out->iv = out->iv;
2780 else {
2781 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2782 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2783 return 0;
2784 }
2785 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2786 }
2787 return 1;
2788 }
2789
2790 default:
2791 return -1;
2792
2793 }
2794}
2795
2796static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2797 const unsigned char *iv, int enc)
2798{
2799 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2800
2801 if (iv == NULL && key == NULL)
2802 return 1;
2803
2804 if (key != NULL) {
2805 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2806
2807 if (keylen <= 0) {
2808 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2809 return 0;
2810 }
2811 do {
2812#ifdef HWAES_CAPABLE
2813 if (HWAES_CAPABLE) {
2814 HWAES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2815 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2816 (block128_f) HWAES_encrypt);
2817# ifdef HWAES_ctr32_encrypt_blocks
2818 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2819# else
2820 gctx->ctr = NULL;
2821# endif
2822 break;
2823 } else
2824#endif
2825#ifdef BSAES_CAPABLE
2826 if (BSAES_CAPABLE) {
2827 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2828 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2829 (block128_f) AES_encrypt);
2830 gctx->ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2831 break;
2832 } else
2833#endif
2834#ifdef VPAES_CAPABLE
2835 if (VPAES_CAPABLE) {
2836 vpaes_set_encrypt_key(key, keylen, &gctx->ks.ks);
2837 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2838 (block128_f) vpaes_encrypt);
2839 gctx->ctr = NULL;
2840 break;
2841 } else
2842#endif
2843 (void)0; /* terminate potentially open 'else' */
2844
2845 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2846 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2847 (block128_f) AES_encrypt);
2848#ifdef AES_CTR_ASM
2849 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2850#else
2851 gctx->ctr = NULL;
2852#endif
2853 } while (0);
2854
2855 /*
2856 * If we have an iv can set it directly, otherwise use saved IV.
2857 */
2858 if (iv == NULL && gctx->iv_set)
2859 iv = gctx->iv;
2860 if (iv) {
2861 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2862 gctx->iv_set = 1;
2863 }
2864 gctx->key_set = 1;
2865 } else {
2866 /* If key set use IV, otherwise copy */
2867 if (gctx->key_set)
2868 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2869 else
2870 memcpy(gctx->iv, iv, gctx->ivlen);
2871 gctx->iv_set = 1;
2872 gctx->iv_gen = 0;
2873 }
2874 return 1;
2875}
2876
2877/*
2878 * Handle TLS GCM packet format. This consists of the last portion of the IV
2879 * followed by the payload and finally the tag. On encrypt generate IV,
2880 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2881 * and verify tag.
2882 */
2883
2884static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2885 const unsigned char *in, size_t len)
2886{
2887 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2888 int rv = -1;
2889 /* Encrypt/decrypt must be performed in place */
2890 if (out != in
2891 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2892 return -1;
2893
2894 /*
2895 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2896 * Requirements from SP 800-38D". The requirements is for one party to the
2897 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2898 * side only.
2899 */
2900 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
2901 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
2902 goto err;
2903 }
2904
2905 /*
2906 * Set IV from start of buffer or generate IV and write to start of
2907 * buffer.
2908 */
2909 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
2910 : EVP_CTRL_GCM_SET_IV_INV,
2911 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2912 goto err;
2913 /* Use saved AAD */
2914 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
2915 goto err;
2916 /* Fix buffer and length to point to payload */
2917 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2918 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2919 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2920 if (ctx->encrypt) {
2921 /* Encrypt payload */
2922 if (gctx->ctr) {
2923 size_t bulk = 0;
2924#if defined(AES_GCM_ASM)
2925 if (len >= 32 && AES_GCM_ASM(gctx)) {
2926 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2927 return -1;
2928
2929 bulk = AES_gcm_encrypt(in, out, len,
2930 gctx->gcm.key,
2931 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2932 gctx->gcm.len.u[1] += bulk;
2933 }
2934#endif
2935 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2936 in + bulk,
2937 out + bulk,
2938 len - bulk, gctx->ctr))
2939 goto err;
2940 } else {
2941 size_t bulk = 0;
2942#if defined(AES_GCM_ASM2)
2943 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2944 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2945 return -1;
2946
2947 bulk = AES_gcm_encrypt(in, out, len,
2948 gctx->gcm.key,
2949 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2950 gctx->gcm.len.u[1] += bulk;
2951 }
2952#endif
2953 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2954 in + bulk, out + bulk, len - bulk))
2955 goto err;
2956 }
2957 out += len;
2958 /* Finally write tag */
2959 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2960 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2961 } else {
2962 /* Decrypt */
2963 if (gctx->ctr) {
2964 size_t bulk = 0;
2965#if defined(AES_GCM_ASM)
2966 if (len >= 16 && AES_GCM_ASM(gctx)) {
2967 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2968 return -1;
2969
2970 bulk = AES_gcm_decrypt(in, out, len,
2971 gctx->gcm.key,
2972 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2973 gctx->gcm.len.u[1] += bulk;
2974 }
2975#endif
2976 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2977 in + bulk,
2978 out + bulk,
2979 len - bulk, gctx->ctr))
2980 goto err;
2981 } else {
2982 size_t bulk = 0;
2983#if defined(AES_GCM_ASM2)
2984 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2985 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2986 return -1;
2987
2988 bulk = AES_gcm_decrypt(in, out, len,
2989 gctx->gcm.key,
2990 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2991 gctx->gcm.len.u[1] += bulk;
2992 }
2993#endif
2994 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
2995 in + bulk, out + bulk, len - bulk))
2996 goto err;
2997 }
2998 /* Retrieve tag */
2999 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
3000 /* If tag mismatch wipe buffer */
3001 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
3002 OPENSSL_cleanse(out, len);
3003 goto err;
3004 }
3005 rv = len;
3006 }
3007
3008 err:
3009 gctx->iv_set = 0;
3010 gctx->tls_aad_len = -1;
3011 return rv;
3012}
3013
3014#ifdef FIPS_MODULE
3015/*
3016 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
3017 *
3018 * See also 8.2.2 RBG-based construction.
3019 * Random construction consists of a free field (which can be NULL) and a
3020 * random field which will use a DRBG that can return at least 96 bits of
3021 * entropy strength. (The DRBG must be seeded by the FIPS module).
3022 */
3023static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
3024{
3025 int sz = gctx->ivlen - offset;
3026
3027 /* Must be at least 96 bits */
3028 if (sz <= 0 || gctx->ivlen < 12)
3029 return 0;
3030
3031 /* Use DRBG to generate random iv */
3032 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
3033 return 0;
3034 return 1;
3035}
3036#endif /* FIPS_MODULE */
3037
3038static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3039 const unsigned char *in, size_t len)
3040{
3041 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3042
3043 /* If not set up, return error */
3044 if (!gctx->key_set)
3045 return -1;
3046
3047 if (gctx->tls_aad_len >= 0)
3048 return aes_gcm_tls_cipher(ctx, out, in, len);
3049
3050#ifdef FIPS_MODULE
3051 /*
3052 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
3053 * The IV can still be set externally (the security policy will state that
3054 * this is not FIPS compliant). There are some applications
3055 * where setting the IV externally is the only option available.
3056 */
3057 if (!gctx->iv_set) {
3058 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
3059 return -1;
3060 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
3061 gctx->iv_set = 1;
3062 gctx->iv_gen_rand = 1;
3063 }
3064#else
3065 if (!gctx->iv_set)
3066 return -1;
3067#endif /* FIPS_MODULE */
3068
3069 if (in) {
3070 if (out == NULL) {
3071 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
3072 return -1;
3073 } else if (ctx->encrypt) {
3074 if (gctx->ctr) {
3075 size_t bulk = 0;
3076#if defined(AES_GCM_ASM)
3077 if (len >= 32 && AES_GCM_ASM(gctx)) {
3078 size_t res = (16 - gctx->gcm.mres) % 16;
3079
3080 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3081 return -1;
3082
3083 bulk = AES_gcm_encrypt(in + res,
3084 out + res, len - res,
3085 gctx->gcm.key, gctx->gcm.Yi.c,
3086 gctx->gcm.Xi.u);
3087 gctx->gcm.len.u[1] += bulk;
3088 bulk += res;
3089 }
3090#endif
3091 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3092 in + bulk,
3093 out + bulk,
3094 len - bulk, gctx->ctr))
3095 return -1;
3096 } else {
3097 size_t bulk = 0;
3098#if defined(AES_GCM_ASM2)
3099 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3100 size_t res = (16 - gctx->gcm.mres) % 16;
3101
3102 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3103 return -1;
3104
3105 bulk = AES_gcm_encrypt(in + res,
3106 out + res, len - res,
3107 gctx->gcm.key, gctx->gcm.Yi.c,
3108 gctx->gcm.Xi.u);
3109 gctx->gcm.len.u[1] += bulk;
3110 bulk += res;
3111 }
3112#endif
3113 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3114 in + bulk, out + bulk, len - bulk))
3115 return -1;
3116 }
3117 } else {
3118 if (gctx->ctr) {
3119 size_t bulk = 0;
3120#if defined(AES_GCM_ASM)
3121 if (len >= 16 && AES_GCM_ASM(gctx)) {
3122 size_t res = (16 - gctx->gcm.mres) % 16;
3123
3124 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3125 return -1;
3126
3127 bulk = AES_gcm_decrypt(in + res,
3128 out + res, len - res,
3129 gctx->gcm.key,
3130 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3131 gctx->gcm.len.u[1] += bulk;
3132 bulk += res;
3133 }
3134#endif
3135 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3136 in + bulk,
3137 out + bulk,
3138 len - bulk, gctx->ctr))
3139 return -1;
3140 } else {
3141 size_t bulk = 0;
3142#if defined(AES_GCM_ASM2)
3143 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3144 size_t res = (16 - gctx->gcm.mres) % 16;
3145
3146 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3147 return -1;
3148
3149 bulk = AES_gcm_decrypt(in + res,
3150 out + res, len - res,
3151 gctx->gcm.key,
3152 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3153 gctx->gcm.len.u[1] += bulk;
3154 bulk += res;
3155 }
3156#endif
3157 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3158 in + bulk, out + bulk, len - bulk))
3159 return -1;
3160 }
3161 }
3162 return len;
3163 } else {
3164 if (!ctx->encrypt) {
3165 if (gctx->taglen < 0)
3166 return -1;
3167 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3168 return -1;
3169 gctx->iv_set = 0;
3170 return 0;
3171 }
3172 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3173 gctx->taglen = 16;
3174 /* Don't reuse the IV */
3175 gctx->iv_set = 0;
3176 return 0;
3177 }
3178
3179}
3180
3181#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3182 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3183 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3184 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3185
3186BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3187 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3188BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3189 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3190BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3191 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3192
3193static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3194{
3195 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3196
3197 if (type == EVP_CTRL_COPY) {
3198 EVP_CIPHER_CTX *out = ptr;
3199 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3200
3201 if (xctx->xts.key1) {
3202 if (xctx->xts.key1 != &xctx->ks1)
3203 return 0;
3204 xctx_out->xts.key1 = &xctx_out->ks1;
3205 }
3206 if (xctx->xts.key2) {
3207 if (xctx->xts.key2 != &xctx->ks2)
3208 return 0;
3209 xctx_out->xts.key2 = &xctx_out->ks2;
3210 }
3211 return 1;
3212 } else if (type != EVP_CTRL_INIT)
3213 return -1;
3214 /* key1 and key2 are used as an indicator both key and IV are set */
3215 xctx->xts.key1 = NULL;
3216 xctx->xts.key2 = NULL;
3217 return 1;
3218}
3219
3220static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3221 const unsigned char *iv, int enc)
3222{
3223 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3224
3225 if (iv == NULL && key == NULL)
3226 return 1;
3227
3228 if (key != NULL) {
3229 do {
3230 /* The key is two half length keys in reality */
3231 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
3232 const int bytes = keylen / 2;
3233 const int bits = bytes * 8;
3234
3235 if (keylen <= 0) {
3236 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3237 return 0;
3238 }
3239 /*
3240 * Verify that the two keys are different.
3241 *
3242 * This addresses the vulnerability described in Rogaway's
3243 * September 2004 paper:
3244 *
3245 * "Efficient Instantiations of Tweakable Blockciphers and
3246 * Refinements to Modes OCB and PMAC".
3247 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3248 *
3249 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3250 * that:
3251 * "The check for Key_1 != Key_2 shall be done at any place
3252 * BEFORE using the keys in the XTS-AES algorithm to process
3253 * data with them."
3254 */
3255 if ((!allow_insecure_decrypt || enc)
3256 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3257 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
3258 return 0;
3259 }
3260
3261#ifdef AES_XTS_ASM
3262 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3263#else
3264 xctx->stream = NULL;
3265#endif
3266 /* key_len is two AES keys */
3267#ifdef HWAES_CAPABLE
3268 if (HWAES_CAPABLE) {
3269 if (enc) {
3270 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3271 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3272# ifdef HWAES_xts_encrypt
3273 xctx->stream = HWAES_xts_encrypt;
3274# endif
3275 } else {
3276 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3277 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3278# ifdef HWAES_xts_decrypt
3279 xctx->stream = HWAES_xts_decrypt;
3280#endif
3281 }
3282
3283 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3284 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3285
3286 xctx->xts.key1 = &xctx->ks1;
3287 break;
3288 } else
3289#endif
3290#ifdef BSAES_CAPABLE
3291 if (BSAES_CAPABLE)
3292 xctx->stream = enc ? ossl_bsaes_xts_encrypt : ossl_bsaes_xts_decrypt;
3293 else
3294#endif
3295#ifdef VPAES_CAPABLE
3296 if (VPAES_CAPABLE) {
3297 if (enc) {
3298 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3299 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3300 } else {
3301 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3302 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3303 }
3304
3305 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3306 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3307
3308 xctx->xts.key1 = &xctx->ks1;
3309 break;
3310 } else
3311#endif
3312 (void)0; /* terminate potentially open 'else' */
3313
3314 if (enc) {
3315 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3316 xctx->xts.block1 = (block128_f) AES_encrypt;
3317 } else {
3318 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3319 xctx->xts.block1 = (block128_f) AES_decrypt;
3320 }
3321
3322 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3323 xctx->xts.block2 = (block128_f) AES_encrypt;
3324
3325 xctx->xts.key1 = &xctx->ks1;
3326 } while (0);
3327 }
3328
3329 if (iv) {
3330 xctx->xts.key2 = &xctx->ks2;
3331 memcpy(ctx->iv, iv, 16);
3332 }
3333
3334 return 1;
3335}
3336
3337static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3338 const unsigned char *in, size_t len)
3339{
3340 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3341
3342 if (xctx->xts.key1 == NULL
3343 || xctx->xts.key2 == NULL
3344 || out == NULL
3345 || in == NULL
3346 || len < AES_BLOCK_SIZE)
3347 return 0;
3348
3349 /*
3350 * Impose a limit of 2^20 blocks per data unit as specified by
3351 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3352 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3353 * NIST SP 800-38E mandates the same limit.
3354 */
3355 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3356 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3357 return 0;
3358 }
3359
3360 if (xctx->stream)
3361 (*xctx->stream) (in, out, len,
3362 xctx->xts.key1, xctx->xts.key2,
3363 ctx->iv);
3364 else if (CRYPTO_xts128_encrypt(&xctx->xts, ctx->iv, in, out, len,
3365 EVP_CIPHER_CTX_is_encrypting(ctx)))
3366 return 0;
3367 return 1;
3368}
3369
3370#define aes_xts_cleanup NULL
3371
3372#define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3373 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3374 | EVP_CIPH_CUSTOM_COPY)
3375
3376BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3377BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3378
3379static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3380{
3381 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3382 switch (type) {
3383 case EVP_CTRL_INIT:
3384 cctx->key_set = 0;
3385 cctx->iv_set = 0;
3386 cctx->L = 8;
3387 cctx->M = 12;
3388 cctx->tag_set = 0;
3389 cctx->len_set = 0;
3390 cctx->tls_aad_len = -1;
3391 return 1;
3392
3393 case EVP_CTRL_GET_IVLEN:
3394 *(int *)ptr = 15 - cctx->L;
3395 return 1;
3396
3397 case EVP_CTRL_AEAD_TLS1_AAD:
3398 /* Save the AAD for later use */
3399 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3400 return 0;
3401 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3402 cctx->tls_aad_len = arg;
3403 {
3404 uint16_t len =
3405 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3406 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3407 /* Correct length for explicit IV */
3408 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3409 return 0;
3410 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3411 /* If decrypting correct for tag too */
3412 if (!EVP_CIPHER_CTX_is_encrypting(c)) {
3413 if (len < cctx->M)
3414 return 0;
3415 len -= cctx->M;
3416 }
3417 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3418 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3419 }
3420 /* Extra padding: tag appended to record */
3421 return cctx->M;
3422
3423 case EVP_CTRL_CCM_SET_IV_FIXED:
3424 /* Sanity check length */
3425 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3426 return 0;
3427 /* Just copy to first part of IV */
3428 memcpy(c->iv, ptr, arg);
3429 return 1;
3430
3431 case EVP_CTRL_AEAD_SET_IVLEN:
3432 arg = 15 - arg;
3433 /* fall through */
3434 case EVP_CTRL_CCM_SET_L:
3435 if (arg < 2 || arg > 8)
3436 return 0;
3437 cctx->L = arg;
3438 return 1;
3439
3440 case EVP_CTRL_AEAD_SET_TAG:
3441 if ((arg & 1) || arg < 4 || arg > 16)
3442 return 0;
3443 if (EVP_CIPHER_CTX_is_encrypting(c) && ptr)
3444 return 0;
3445 if (ptr) {
3446 cctx->tag_set = 1;
3447 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3448 }
3449 cctx->M = arg;
3450 return 1;
3451
3452 case EVP_CTRL_AEAD_GET_TAG:
3453 if (!EVP_CIPHER_CTX_is_encrypting(c) || !cctx->tag_set)
3454 return 0;
3455 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3456 return 0;
3457 cctx->tag_set = 0;
3458 cctx->iv_set = 0;
3459 cctx->len_set = 0;
3460 return 1;
3461
3462 case EVP_CTRL_COPY:
3463 {
3464 EVP_CIPHER_CTX *out = ptr;
3465 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3466 if (cctx->ccm.key) {
3467 if (cctx->ccm.key != &cctx->ks)
3468 return 0;
3469 cctx_out->ccm.key = &cctx_out->ks;
3470 }
3471 return 1;
3472 }
3473
3474 default:
3475 return -1;
3476
3477 }
3478}
3479
3480static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3481 const unsigned char *iv, int enc)
3482{
3483 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3484
3485 if (iv == NULL && key == NULL)
3486 return 1;
3487
3488 if (key != NULL) {
3489 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3490
3491 if (keylen <= 0) {
3492 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3493 return 0;
3494 }
3495 do {
3496#ifdef HWAES_CAPABLE
3497 if (HWAES_CAPABLE) {
3498 HWAES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3499
3500 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3501 &cctx->ks, (block128_f) HWAES_encrypt);
3502 cctx->str = NULL;
3503 cctx->key_set = 1;
3504 break;
3505 } else
3506#endif
3507#ifdef VPAES_CAPABLE
3508 if (VPAES_CAPABLE) {
3509 vpaes_set_encrypt_key(key, keylen, &cctx->ks.ks);
3510 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3511 &cctx->ks, (block128_f) vpaes_encrypt);
3512 cctx->str = NULL;
3513 cctx->key_set = 1;
3514 break;
3515 }
3516#endif
3517 AES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3518 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3519 &cctx->ks, (block128_f) AES_encrypt);
3520 cctx->str = NULL;
3521 cctx->key_set = 1;
3522 } while (0);
3523 }
3524 if (iv != NULL) {
3525 memcpy(ctx->iv, iv, 15 - cctx->L);
3526 cctx->iv_set = 1;
3527 }
3528 return 1;
3529}
3530
3531static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3532 const unsigned char *in, size_t len)
3533{
3534 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3535 CCM128_CONTEXT *ccm = &cctx->ccm;
3536 /* Encrypt/decrypt must be performed in place */
3537 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3538 return -1;
3539 /* If encrypting set explicit IV from sequence number (start of AAD) */
3540 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3541 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3542 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3543 /* Get rest of IV from explicit IV */
3544 memcpy(ctx->iv + EVP_CCM_TLS_FIXED_IV_LEN, in,
3545 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3546 /* Correct length value */
3547 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3548 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L,
3549 len))
3550 return -1;
3551 /* Use saved AAD */
3552 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx),
3553 cctx->tls_aad_len);
3554 /* Fix buffer to point to payload */
3555 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3556 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3557 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3558 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3559 cctx->str) :
3560 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3561 return -1;
3562 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3563 return -1;
3564 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3565 } else {
3566 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3567 cctx->str) :
3568 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3569 unsigned char tag[16];
3570 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3571 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3572 return len;
3573 }
3574 }
3575 OPENSSL_cleanse(out, len);
3576 return -1;
3577 }
3578}
3579
3580static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3581 const unsigned char *in, size_t len)
3582{
3583 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3584 CCM128_CONTEXT *ccm = &cctx->ccm;
3585 /* If not set up, return error */
3586 if (!cctx->key_set)
3587 return -1;
3588
3589 if (cctx->tls_aad_len >= 0)
3590 return aes_ccm_tls_cipher(ctx, out, in, len);
3591
3592 /* EVP_*Final() doesn't return any data */
3593 if (in == NULL && out != NULL)
3594 return 0;
3595
3596 if (!cctx->iv_set)
3597 return -1;
3598
3599 if (!out) {
3600 if (!in) {
3601 if (CRYPTO_ccm128_setiv(ccm, ctx->iv,
3602 15 - cctx->L, len))
3603 return -1;
3604 cctx->len_set = 1;
3605 return len;
3606 }
3607 /* If have AAD need message length */
3608 if (!cctx->len_set && len)
3609 return -1;
3610 CRYPTO_ccm128_aad(ccm, in, len);
3611 return len;
3612 }
3613
3614 /* The tag must be set before actually decrypting data */
3615 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && !cctx->tag_set)
3616 return -1;
3617
3618 /* If not set length yet do it */
3619 if (!cctx->len_set) {
3620 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L, len))
3621 return -1;
3622 cctx->len_set = 1;
3623 }
3624 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3625 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3626 cctx->str) :
3627 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3628 return -1;
3629 cctx->tag_set = 1;
3630 return len;
3631 } else {
3632 int rv = -1;
3633 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3634 cctx->str) :
3635 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3636 unsigned char tag[16];
3637 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3638 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3639 cctx->M))
3640 rv = len;
3641 }
3642 }
3643 if (rv == -1)
3644 OPENSSL_cleanse(out, len);
3645 cctx->iv_set = 0;
3646 cctx->tag_set = 0;
3647 cctx->len_set = 0;
3648 return rv;
3649 }
3650}
3651
3652#define aes_ccm_cleanup NULL
3653
3654BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3655 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3656BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3657 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3658BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3659 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3660
3661typedef struct {
3662 union {
3663 OSSL_UNION_ALIGN;
3664 AES_KEY ks;
3665 } ks;
3666 /* Indicates if IV has been set */
3667 unsigned char *iv;
3668} EVP_AES_WRAP_CTX;
3669
3670static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3671 const unsigned char *iv, int enc)
3672{
3673 int len;
3674 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3675
3676 if (iv == NULL && key == NULL)
3677 return 1;
3678 if (key != NULL) {
3679 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3680
3681 if (keylen <= 0) {
3682 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3683 return 0;
3684 }
3685 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3686 AES_set_encrypt_key(key, keylen, &wctx->ks.ks);
3687 else
3688 AES_set_decrypt_key(key, keylen, &wctx->ks.ks);
3689 if (iv == NULL)
3690 wctx->iv = NULL;
3691 }
3692 if (iv != NULL) {
3693 if ((len = EVP_CIPHER_CTX_get_iv_length(ctx)) < 0)
3694 return 0;
3695 memcpy(ctx->iv, iv, len);
3696 wctx->iv = ctx->iv;
3697 }
3698 return 1;
3699}
3700
3701static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3702 const unsigned char *in, size_t inlen)
3703{
3704 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3705 size_t rv;
3706 /* AES wrap with padding has IV length of 4, without padding 8 */
3707 int pad = EVP_CIPHER_CTX_get_iv_length(ctx) == 4;
3708 /* No final operation so always return zero length */
3709 if (!in)
3710 return 0;
3711 /* Input length must always be non-zero */
3712 if (!inlen)
3713 return -1;
3714 /* If decrypting need at least 16 bytes and multiple of 8 */
3715 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3716 return -1;
3717 /* If not padding input must be multiple of 8 */
3718 if (!pad && inlen & 0x7)
3719 return -1;
3720 if (ossl_is_partially_overlapping(out, in, inlen)) {
3721 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3722 return 0;
3723 }
3724 if (!out) {
3725 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3726 /* If padding round up to multiple of 8 */
3727 if (pad)
3728 inlen = (inlen + 7) / 8 * 8;
3729 /* 8 byte prefix */
3730 return inlen + 8;
3731 } else {
3732 /*
3733 * If not padding output will be exactly 8 bytes smaller than
3734 * input. If padding it will be at least 8 bytes smaller but we
3735 * don't know how much.
3736 */
3737 return inlen - 8;
3738 }
3739 }
3740 if (pad) {
3741 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3742 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3743 out, in, inlen,
3744 (block128_f) AES_encrypt);
3745 else
3746 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3747 out, in, inlen,
3748 (block128_f) AES_decrypt);
3749 } else {
3750 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3751 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3752 out, in, inlen, (block128_f) AES_encrypt);
3753 else
3754 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3755 out, in, inlen, (block128_f) AES_decrypt);
3756 }
3757 return rv ? (int)rv : -1;
3758}
3759
3760#define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3761 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3762 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3763
3764static const EVP_CIPHER aes_128_wrap = {
3765 NID_id_aes128_wrap,
3766 8, 16, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3767 aes_wrap_init_key, aes_wrap_cipher,
3768 NULL,
3769 sizeof(EVP_AES_WRAP_CTX),
3770 NULL, NULL, NULL, NULL
3771};
3772
3773const EVP_CIPHER *EVP_aes_128_wrap(void)
3774{
3775 return &aes_128_wrap;
3776}
3777
3778static const EVP_CIPHER aes_192_wrap = {
3779 NID_id_aes192_wrap,
3780 8, 24, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3781 aes_wrap_init_key, aes_wrap_cipher,
3782 NULL,
3783 sizeof(EVP_AES_WRAP_CTX),
3784 NULL, NULL, NULL, NULL
3785};
3786
3787const EVP_CIPHER *EVP_aes_192_wrap(void)
3788{
3789 return &aes_192_wrap;
3790}
3791
3792static const EVP_CIPHER aes_256_wrap = {
3793 NID_id_aes256_wrap,
3794 8, 32, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3795 aes_wrap_init_key, aes_wrap_cipher,
3796 NULL,
3797 sizeof(EVP_AES_WRAP_CTX),
3798 NULL, NULL, NULL, NULL
3799};
3800
3801const EVP_CIPHER *EVP_aes_256_wrap(void)
3802{
3803 return &aes_256_wrap;
3804}
3805
3806static const EVP_CIPHER aes_128_wrap_pad = {
3807 NID_id_aes128_wrap_pad,
3808 8, 16, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3809 aes_wrap_init_key, aes_wrap_cipher,
3810 NULL,
3811 sizeof(EVP_AES_WRAP_CTX),
3812 NULL, NULL, NULL, NULL
3813};
3814
3815const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3816{
3817 return &aes_128_wrap_pad;
3818}
3819
3820static const EVP_CIPHER aes_192_wrap_pad = {
3821 NID_id_aes192_wrap_pad,
3822 8, 24, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3823 aes_wrap_init_key, aes_wrap_cipher,
3824 NULL,
3825 sizeof(EVP_AES_WRAP_CTX),
3826 NULL, NULL, NULL, NULL
3827};
3828
3829const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3830{
3831 return &aes_192_wrap_pad;
3832}
3833
3834static const EVP_CIPHER aes_256_wrap_pad = {
3835 NID_id_aes256_wrap_pad,
3836 8, 32, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3837 aes_wrap_init_key, aes_wrap_cipher,
3838 NULL,
3839 sizeof(EVP_AES_WRAP_CTX),
3840 NULL, NULL, NULL, NULL
3841};
3842
3843const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3844{
3845 return &aes_256_wrap_pad;
3846}
3847
3848#ifndef OPENSSL_NO_OCB
3849static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3850{
3851 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3852 EVP_CIPHER_CTX *newc;
3853 EVP_AES_OCB_CTX *new_octx;
3854
3855 switch (type) {
3856 case EVP_CTRL_INIT:
3857 octx->key_set = 0;
3858 octx->iv_set = 0;
3859 octx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
3860 octx->iv = c->iv;
3861 octx->taglen = 16;
3862 octx->data_buf_len = 0;
3863 octx->aad_buf_len = 0;
3864 return 1;
3865
3866 case EVP_CTRL_GET_IVLEN:
3867 *(int *)ptr = octx->ivlen;
3868 return 1;
3869
3870 case EVP_CTRL_AEAD_SET_IVLEN:
3871 /* IV len must be 1 to 15 */
3872 if (arg <= 0 || arg > 15)
3873 return 0;
3874
3875 octx->ivlen = arg;
3876 return 1;
3877
3878 case EVP_CTRL_AEAD_SET_TAG:
3879 if (ptr == NULL) {
3880 /* Tag len must be 0 to 16 */
3881 if (arg < 0 || arg > 16)
3882 return 0;
3883
3884 octx->taglen = arg;
3885 return 1;
3886 }
3887 if (arg != octx->taglen || EVP_CIPHER_CTX_is_encrypting(c))
3888 return 0;
3889 memcpy(octx->tag, ptr, arg);
3890 return 1;
3891
3892 case EVP_CTRL_AEAD_GET_TAG:
3893 if (arg != octx->taglen || !EVP_CIPHER_CTX_is_encrypting(c))
3894 return 0;
3895
3896 memcpy(ptr, octx->tag, arg);
3897 return 1;
3898
3899 case EVP_CTRL_COPY:
3900 newc = (EVP_CIPHER_CTX *)ptr;
3901 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3902 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3903 &new_octx->ksenc.ks,
3904 &new_octx->ksdec.ks);
3905
3906 default:
3907 return -1;
3908
3909 }
3910}
3911
3912static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3913 const unsigned char *iv, int enc)
3914{
3915 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3916
3917 if (iv == NULL && key == NULL)
3918 return 1;
3919
3920 if (key != NULL) {
3921 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3922
3923 if (keylen <= 0) {
3924 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3925 return 0;
3926 }
3927 do {
3928 /*
3929 * We set both the encrypt and decrypt key here because decrypt
3930 * needs both. We could possibly optimise to remove setting the
3931 * decrypt for an encryption operation.
3932 */
3933# ifdef HWAES_CAPABLE
3934 if (HWAES_CAPABLE) {
3935 HWAES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3936 HWAES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3937 if (!CRYPTO_ocb128_init(&octx->ocb,
3938 &octx->ksenc.ks, &octx->ksdec.ks,
3939 (block128_f) HWAES_encrypt,
3940 (block128_f) HWAES_decrypt,
3941 enc ? HWAES_ocb_encrypt
3942 : HWAES_ocb_decrypt))
3943 return 0;
3944 break;
3945 }
3946# endif
3947# ifdef VPAES_CAPABLE
3948 if (VPAES_CAPABLE) {
3949 vpaes_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3950 vpaes_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3951 if (!CRYPTO_ocb128_init(&octx->ocb,
3952 &octx->ksenc.ks, &octx->ksdec.ks,
3953 (block128_f) vpaes_encrypt,
3954 (block128_f) vpaes_decrypt,
3955 NULL))
3956 return 0;
3957 break;
3958 }
3959# endif
3960 AES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3961 AES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3962 if (!CRYPTO_ocb128_init(&octx->ocb,
3963 &octx->ksenc.ks, &octx->ksdec.ks,
3964 (block128_f) AES_encrypt,
3965 (block128_f) AES_decrypt,
3966 NULL))
3967 return 0;
3968 }
3969 while (0);
3970
3971 /*
3972 * If we have an iv we can set it directly, otherwise use saved IV.
3973 */
3974 if (iv == NULL && octx->iv_set)
3975 iv = octx->iv;
3976 if (iv) {
3977 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3978 != 1)
3979 return 0;
3980 octx->iv_set = 1;
3981 }
3982 octx->key_set = 1;
3983 } else {
3984 /* If key set use IV, otherwise copy */
3985 if (octx->key_set)
3986 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3987 else
3988 memcpy(octx->iv, iv, octx->ivlen);
3989 octx->iv_set = 1;
3990 }
3991 return 1;
3992}
3993
3994static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3995 const unsigned char *in, size_t len)
3996{
3997 unsigned char *buf;
3998 int *buf_len;
3999 int written_len = 0;
4000 size_t trailing_len;
4001 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4002
4003 /* If IV or Key not set then return error */
4004 if (!octx->iv_set)
4005 return -1;
4006
4007 if (!octx->key_set)
4008 return -1;
4009
4010 if (in != NULL) {
4011 /*
4012 * Need to ensure we are only passing full blocks to low-level OCB
4013 * routines. We do it here rather than in EVP_EncryptUpdate/
4014 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
4015 * and those routines don't support that
4016 */
4017
4018 /* Are we dealing with AAD or normal data here? */
4019 if (out == NULL) {
4020 buf = octx->aad_buf;
4021 buf_len = &(octx->aad_buf_len);
4022 } else {
4023 buf = octx->data_buf;
4024 buf_len = &(octx->data_buf_len);
4025
4026 if (ossl_is_partially_overlapping(out + *buf_len, in, len)) {
4027 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
4028 return 0;
4029 }
4030 }
4031
4032 /*
4033 * If we've got a partially filled buffer from a previous call then
4034 * use that data first
4035 */
4036 if (*buf_len > 0) {
4037 unsigned int remaining;
4038
4039 remaining = AES_BLOCK_SIZE - (*buf_len);
4040 if (remaining > len) {
4041 memcpy(buf + (*buf_len), in, len);
4042 *(buf_len) += len;
4043 return 0;
4044 }
4045 memcpy(buf + (*buf_len), in, remaining);
4046
4047 /*
4048 * If we get here we've filled the buffer, so process it
4049 */
4050 len -= remaining;
4051 in += remaining;
4052 if (out == NULL) {
4053 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
4054 return -1;
4055 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4056 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
4057 AES_BLOCK_SIZE))
4058 return -1;
4059 } else {
4060 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
4061 AES_BLOCK_SIZE))
4062 return -1;
4063 }
4064 written_len = AES_BLOCK_SIZE;
4065 *buf_len = 0;
4066 if (out != NULL)
4067 out += AES_BLOCK_SIZE;
4068 }
4069
4070 /* Do we have a partial block to handle at the end? */
4071 trailing_len = len % AES_BLOCK_SIZE;
4072
4073 /*
4074 * If we've got some full blocks to handle, then process these first
4075 */
4076 if (len != trailing_len) {
4077 if (out == NULL) {
4078 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
4079 return -1;
4080 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4081 if (!CRYPTO_ocb128_encrypt
4082 (&octx->ocb, in, out, len - trailing_len))
4083 return -1;
4084 } else {
4085 if (!CRYPTO_ocb128_decrypt
4086 (&octx->ocb, in, out, len - trailing_len))
4087 return -1;
4088 }
4089 written_len += len - trailing_len;
4090 in += len - trailing_len;
4091 }
4092
4093 /* Handle any trailing partial block */
4094 if (trailing_len > 0) {
4095 memcpy(buf, in, trailing_len);
4096 *buf_len = trailing_len;
4097 }
4098
4099 return written_len;
4100 } else {
4101 /*
4102 * First of all empty the buffer of any partial block that we might
4103 * have been provided - both for data and AAD
4104 */
4105 if (octx->data_buf_len > 0) {
4106 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4107 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
4108 octx->data_buf_len))
4109 return -1;
4110 } else {
4111 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
4112 octx->data_buf_len))
4113 return -1;
4114 }
4115 written_len = octx->data_buf_len;
4116 octx->data_buf_len = 0;
4117 }
4118 if (octx->aad_buf_len > 0) {
4119 if (!CRYPTO_ocb128_aad
4120 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4121 return -1;
4122 octx->aad_buf_len = 0;
4123 }
4124 /* If decrypting then verify */
4125 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
4126 if (octx->taglen < 0)
4127 return -1;
4128 if (CRYPTO_ocb128_finish(&octx->ocb,
4129 octx->tag, octx->taglen) != 0)
4130 return -1;
4131 octx->iv_set = 0;
4132 return written_len;
4133 }
4134 /* If encrypting then just get the tag */
4135 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4136 return -1;
4137 /* Don't reuse the IV */
4138 octx->iv_set = 0;
4139 return written_len;
4140 }
4141}
4142
4143static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4144{
4145 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4146 CRYPTO_ocb128_cleanup(&octx->ocb);
4147 return 1;
4148}
4149
4150BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4151 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4152BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4153 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4154BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4155 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4156#endif /* OPENSSL_NO_OCB */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette