VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.7/crypto/modes/gcm128.c@ 108344

最後變更 在這個檔案從108344是 104078,由 vboxsync 提交於 12 月 前

openssl-3.1.5: Applied and adjusted our OpenSSL changes to 3.1.4. bugref:10638

檔案大小: 41.8 KB
 
1/*
2 * Copyright 2010-2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10#include <string.h>
11#include <openssl/crypto.h>
12#include "internal/cryptlib.h"
13#include "internal/endian.h"
14#include "crypto/modes.h"
15
16#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT)
17typedef size_t size_t_aX __attribute((__aligned__(1)));
18#else
19typedef size_t size_t_aX;
20#endif
21
22#if defined(BSWAP4) && defined(STRICT_ALIGNMENT)
23/* redefine, because alignment is ensured */
24# undef GETU32
25# define GETU32(p) BSWAP4(*(const u32 *)(p))
26# undef PUTU32
27# define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
28#endif
29
30/* RISC-V uses C implementation of gmult as a fallback. */
31#if defined(__riscv)
32# define INCLUDE_C_GMULT_4BIT
33#endif
34
35#define PACK(s) ((size_t)(s)<<(sizeof(size_t)*8-16))
36#define REDUCE1BIT(V) do { \
37 if (sizeof(size_t)==8) { \
38 u64 T = U64(0xe100000000000000) & (0-(V.lo&1)); \
39 V.lo = (V.hi<<63)|(V.lo>>1); \
40 V.hi = (V.hi>>1 )^T; \
41 } \
42 else { \
43 u32 T = 0xe1000000U & (0-(u32)(V.lo&1)); \
44 V.lo = (V.hi<<63)|(V.lo>>1); \
45 V.hi = (V.hi>>1 )^((u64)T<<32); \
46 } \
47} while(0)
48
49/*-
50 *
51 * NOTE: TABLE_BITS and all non-4bit implmentations have been removed in 3.1.
52 *
53 * Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
54 * never be set to 8. 8 is effectively reserved for testing purposes.
55 * TABLE_BITS>1 are lookup-table-driven implementations referred to as
56 * "Shoup's" in GCM specification. In other words OpenSSL does not cover
57 * whole spectrum of possible table driven implementations. Why? In
58 * non-"Shoup's" case memory access pattern is segmented in such manner,
59 * that it's trivial to see that cache timing information can reveal
60 * fair portion of intermediate hash value. Given that ciphertext is
61 * always available to attacker, it's possible for him to attempt to
62 * deduce secret parameter H and if successful, tamper with messages
63 * [which is nothing but trivial in CTR mode]. In "Shoup's" case it's
64 * not as trivial, but there is no reason to believe that it's resistant
65 * to cache-timing attack. And the thing about "8-bit" implementation is
66 * that it consumes 16 (sixteen) times more memory, 4KB per individual
67 * key + 1KB shared. Well, on pros side it should be twice as fast as
68 * "4-bit" version. And for gcc-generated x86[_64] code, "8-bit" version
69 * was observed to run ~75% faster, closer to 100% for commercial
70 * compilers... Yet "4-bit" procedure is preferred, because it's
71 * believed to provide better security-performance balance and adequate
72 * all-round performance. "All-round" refers to things like:
73 *
74 * - shorter setup time effectively improves overall timing for
75 * handling short messages;
76 * - larger table allocation can become unbearable because of VM
77 * subsystem penalties (for example on Windows large enough free
78 * results in VM working set trimming, meaning that consequent
79 * malloc would immediately incur working set expansion);
80 * - larger table has larger cache footprint, which can affect
81 * performance of other code paths (not necessarily even from same
82 * thread in Hyper-Threading world);
83 *
84 * Value of 1 is not appropriate for performance reasons.
85 */
86
87static void gcm_init_4bit(u128 Htable[16], const u64 H[2])
88{
89 u128 V;
90# if defined(OPENSSL_SMALL_FOOTPRINT)
91 int i;
92# endif
93
94 Htable[0].hi = 0;
95 Htable[0].lo = 0;
96 V.hi = H[0];
97 V.lo = H[1];
98
99# if defined(OPENSSL_SMALL_FOOTPRINT)
100 for (Htable[8] = V, i = 4; i > 0; i >>= 1) {
101 REDUCE1BIT(V);
102 Htable[i] = V;
103 }
104
105 for (i = 2; i < 16; i <<= 1) {
106 u128 *Hi = Htable + i;
107 int j;
108 for (V = *Hi, j = 1; j < i; ++j) {
109 Hi[j].hi = V.hi ^ Htable[j].hi;
110 Hi[j].lo = V.lo ^ Htable[j].lo;
111 }
112 }
113# else
114 Htable[8] = V;
115 REDUCE1BIT(V);
116 Htable[4] = V;
117 REDUCE1BIT(V);
118 Htable[2] = V;
119 REDUCE1BIT(V);
120 Htable[1] = V;
121 Htable[3].hi = V.hi ^ Htable[2].hi, Htable[3].lo = V.lo ^ Htable[2].lo;
122 V = Htable[4];
123 Htable[5].hi = V.hi ^ Htable[1].hi, Htable[5].lo = V.lo ^ Htable[1].lo;
124 Htable[6].hi = V.hi ^ Htable[2].hi, Htable[6].lo = V.lo ^ Htable[2].lo;
125 Htable[7].hi = V.hi ^ Htable[3].hi, Htable[7].lo = V.lo ^ Htable[3].lo;
126 V = Htable[8];
127 Htable[9].hi = V.hi ^ Htable[1].hi, Htable[9].lo = V.lo ^ Htable[1].lo;
128 Htable[10].hi = V.hi ^ Htable[2].hi, Htable[10].lo = V.lo ^ Htable[2].lo;
129 Htable[11].hi = V.hi ^ Htable[3].hi, Htable[11].lo = V.lo ^ Htable[3].lo;
130 Htable[12].hi = V.hi ^ Htable[4].hi, Htable[12].lo = V.lo ^ Htable[4].lo;
131 Htable[13].hi = V.hi ^ Htable[5].hi, Htable[13].lo = V.lo ^ Htable[5].lo;
132 Htable[14].hi = V.hi ^ Htable[6].hi, Htable[14].lo = V.lo ^ Htable[6].lo;
133 Htable[15].hi = V.hi ^ Htable[7].hi, Htable[15].lo = V.lo ^ Htable[7].lo;
134# endif
135# if defined(GHASH_ASM) && (defined(__arm__) || defined(__arm))
136 /*
137 * ARM assembler expects specific dword order in Htable.
138 */
139 {
140 int j;
141 DECLARE_IS_ENDIAN;
142
143 if (IS_LITTLE_ENDIAN)
144 for (j = 0; j < 16; ++j) {
145 V = Htable[j];
146 Htable[j].hi = V.lo;
147 Htable[j].lo = V.hi;
148 } else
149 for (j = 0; j < 16; ++j) {
150 V = Htable[j];
151 Htable[j].hi = V.lo << 32 | V.lo >> 32;
152 Htable[j].lo = V.hi << 32 | V.hi >> 32;
153 }
154 }
155# endif
156}
157
158# if !defined(GHASH_ASM) || defined(INCLUDE_C_GMULT_4BIT)
159static const size_t rem_4bit[16] = {
160 PACK(0x0000), PACK(0x1C20), PACK(0x3840), PACK(0x2460),
161 PACK(0x7080), PACK(0x6CA0), PACK(0x48C0), PACK(0x54E0),
162 PACK(0xE100), PACK(0xFD20), PACK(0xD940), PACK(0xC560),
163 PACK(0x9180), PACK(0x8DA0), PACK(0xA9C0), PACK(0xB5E0)
164};
165
166static void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16])
167{
168 u128 Z;
169 int cnt = 15;
170 size_t rem, nlo, nhi;
171 DECLARE_IS_ENDIAN;
172
173 nlo = ((const u8 *)Xi)[15];
174 nhi = nlo >> 4;
175 nlo &= 0xf;
176
177 Z.hi = Htable[nlo].hi;
178 Z.lo = Htable[nlo].lo;
179
180 while (1) {
181 rem = (size_t)Z.lo & 0xf;
182 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
183 Z.hi = (Z.hi >> 4);
184 if (sizeof(size_t) == 8)
185 Z.hi ^= rem_4bit[rem];
186 else
187 Z.hi ^= (u64)rem_4bit[rem] << 32;
188
189 Z.hi ^= Htable[nhi].hi;
190 Z.lo ^= Htable[nhi].lo;
191
192 if (--cnt < 0)
193 break;
194
195 nlo = ((const u8 *)Xi)[cnt];
196 nhi = nlo >> 4;
197 nlo &= 0xf;
198
199 rem = (size_t)Z.lo & 0xf;
200 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
201 Z.hi = (Z.hi >> 4);
202 if (sizeof(size_t) == 8)
203 Z.hi ^= rem_4bit[rem];
204 else
205 Z.hi ^= (u64)rem_4bit[rem] << 32;
206
207 Z.hi ^= Htable[nlo].hi;
208 Z.lo ^= Htable[nlo].lo;
209 }
210
211 if (IS_LITTLE_ENDIAN) {
212# ifdef BSWAP8
213 Xi[0] = BSWAP8(Z.hi);
214 Xi[1] = BSWAP8(Z.lo);
215# else
216 u8 *p = (u8 *)Xi;
217 u32 v;
218 v = (u32)(Z.hi >> 32);
219 PUTU32(p, v);
220 v = (u32)(Z.hi);
221 PUTU32(p + 4, v);
222 v = (u32)(Z.lo >> 32);
223 PUTU32(p + 8, v);
224 v = (u32)(Z.lo);
225 PUTU32(p + 12, v);
226# endif
227 } else {
228 Xi[0] = Z.hi;
229 Xi[1] = Z.lo;
230 }
231}
232
233# endif
234
235# if !defined(GHASH_ASM)
236# if !defined(OPENSSL_SMALL_FOOTPRINT)
237/*
238 * Streamed gcm_mult_4bit, see CRYPTO_gcm128_[en|de]crypt for
239 * details... Compiler-generated code doesn't seem to give any
240 * performance improvement, at least not on x86[_64]. It's here
241 * mostly as reference and a placeholder for possible future
242 * non-trivial optimization[s]...
243 */
244static void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16],
245 const u8 *inp, size_t len)
246{
247 u128 Z;
248 int cnt;
249 size_t rem, nlo, nhi;
250 DECLARE_IS_ENDIAN;
251
252 do {
253 cnt = 15;
254 nlo = ((const u8 *)Xi)[15];
255 nlo ^= inp[15];
256 nhi = nlo >> 4;
257 nlo &= 0xf;
258
259 Z.hi = Htable[nlo].hi;
260 Z.lo = Htable[nlo].lo;
261
262 while (1) {
263 rem = (size_t)Z.lo & 0xf;
264 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
265 Z.hi = (Z.hi >> 4);
266 if (sizeof(size_t) == 8)
267 Z.hi ^= rem_4bit[rem];
268 else
269 Z.hi ^= (u64)rem_4bit[rem] << 32;
270
271 Z.hi ^= Htable[nhi].hi;
272 Z.lo ^= Htable[nhi].lo;
273
274 if (--cnt < 0)
275 break;
276
277 nlo = ((const u8 *)Xi)[cnt];
278 nlo ^= inp[cnt];
279 nhi = nlo >> 4;
280 nlo &= 0xf;
281
282 rem = (size_t)Z.lo & 0xf;
283 Z.lo = (Z.hi << 60) | (Z.lo >> 4);
284 Z.hi = (Z.hi >> 4);
285 if (sizeof(size_t) == 8)
286 Z.hi ^= rem_4bit[rem];
287 else
288 Z.hi ^= (u64)rem_4bit[rem] << 32;
289
290 Z.hi ^= Htable[nlo].hi;
291 Z.lo ^= Htable[nlo].lo;
292 }
293
294 if (IS_LITTLE_ENDIAN) {
295# ifdef BSWAP8
296 Xi[0] = BSWAP8(Z.hi);
297 Xi[1] = BSWAP8(Z.lo);
298# else
299 u8 *p = (u8 *)Xi;
300 u32 v;
301 v = (u32)(Z.hi >> 32);
302 PUTU32(p, v);
303 v = (u32)(Z.hi);
304 PUTU32(p + 4, v);
305 v = (u32)(Z.lo >> 32);
306 PUTU32(p + 8, v);
307 v = (u32)(Z.lo);
308 PUTU32(p + 12, v);
309# endif
310 } else {
311 Xi[0] = Z.hi;
312 Xi[1] = Z.lo;
313 }
314 } while (inp += 16, len -= 16);
315}
316# endif
317# else
318void gcm_gmult_4bit(u64 Xi[2], const u128 Htable[16]);
319void gcm_ghash_4bit(u64 Xi[2], const u128 Htable[16], const u8 *inp,
320 size_t len);
321# endif
322
323# define GCM_MUL(ctx) ctx->funcs.gmult(ctx->Xi.u,ctx->Htable)
324# if defined(GHASH_ASM) || !defined(OPENSSL_SMALL_FOOTPRINT)
325# define GHASH(ctx,in,len) ctx->funcs.ghash((ctx)->Xi.u,(ctx)->Htable,in,len)
326/*
327 * GHASH_CHUNK is "stride parameter" missioned to mitigate cache trashing
328 * effect. In other words idea is to hash data while it's still in L1 cache
329 * after encryption pass...
330 */
331# define GHASH_CHUNK (3*1024)
332# endif
333
334#if (defined(GHASH_ASM) || defined(OPENSSL_CPUID_OBJ))
335# if !defined(I386_ONLY) && \
336 (defined(__i386) || defined(__i386__) || \
337 defined(__x86_64) || defined(__x86_64__) || \
338 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
339# define GHASH_ASM_X86_OR_64
340
341void gcm_init_clmul(u128 Htable[16], const u64 Xi[2]);
342void gcm_gmult_clmul(u64 Xi[2], const u128 Htable[16]);
343void gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp,
344 size_t len);
345
346# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
347# define gcm_init_avx gcm_init_clmul
348# define gcm_gmult_avx gcm_gmult_clmul
349# define gcm_ghash_avx gcm_ghash_clmul
350# else
351void gcm_init_avx(u128 Htable[16], const u64 Xi[2]);
352void gcm_gmult_avx(u64 Xi[2], const u128 Htable[16]);
353void gcm_ghash_avx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
354 size_t len);
355# endif
356
357# if defined(__i386) || defined(__i386__) || defined(_M_IX86)
358# define GHASH_ASM_X86
359void gcm_gmult_4bit_mmx(u64 Xi[2], const u128 Htable[16]);
360void gcm_ghash_4bit_mmx(u64 Xi[2], const u128 Htable[16], const u8 *inp,
361 size_t len);
362
363void gcm_gmult_4bit_x86(u64 Xi[2], const u128 Htable[16]);
364void gcm_ghash_4bit_x86(u64 Xi[2], const u128 Htable[16], const u8 *inp,
365 size_t len);
366# endif
367# elif defined(__arm__) || defined(__arm) || defined(__aarch64__)
368# include "arm_arch.h"
369# if __ARM_MAX_ARCH__>=7
370# define GHASH_ASM_ARM
371# define PMULL_CAPABLE (OPENSSL_armcap_P & ARMV8_PMULL)
372# if defined(__arm__) || defined(__arm)
373# define NEON_CAPABLE (OPENSSL_armcap_P & ARMV7_NEON)
374# endif
375void gcm_init_neon(u128 Htable[16], const u64 Xi[2]);
376void gcm_gmult_neon(u64 Xi[2], const u128 Htable[16]);
377void gcm_ghash_neon(u64 Xi[2], const u128 Htable[16], const u8 *inp,
378 size_t len);
379void gcm_init_v8(u128 Htable[16], const u64 Xi[2]);
380void gcm_gmult_v8(u64 Xi[2], const u128 Htable[16]);
381void gcm_ghash_v8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
382 size_t len);
383# endif
384# elif defined(__sparc__) || defined(__sparc)
385# include "crypto/sparc_arch.h"
386# define GHASH_ASM_SPARC
387void gcm_init_vis3(u128 Htable[16], const u64 Xi[2]);
388void gcm_gmult_vis3(u64 Xi[2], const u128 Htable[16]);
389void gcm_ghash_vis3(u64 Xi[2], const u128 Htable[16], const u8 *inp,
390 size_t len);
391# elif defined(OPENSSL_CPUID_OBJ) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC))
392# include "crypto/ppc_arch.h"
393# define GHASH_ASM_PPC
394void gcm_init_p8(u128 Htable[16], const u64 Xi[2]);
395void gcm_gmult_p8(u64 Xi[2], const u128 Htable[16]);
396void gcm_ghash_p8(u64 Xi[2], const u128 Htable[16], const u8 *inp,
397 size_t len);
398# elif defined(OPENSSL_CPUID_OBJ) && defined(__riscv) && __riscv_xlen == 64
399# include "crypto/riscv_arch.h"
400# define GHASH_ASM_RISCV
401# undef GHASH
402void gcm_init_clmul_rv64i_zbb_zbc(u128 Htable[16], const u64 Xi[2]);
403void gcm_gmult_clmul_rv64i_zbb_zbc(u64 Xi[2], const u128 Htable[16]);
404# endif
405#endif
406
407static void gcm_get_funcs(struct gcm_funcs_st *ctx)
408{
409 /* set defaults -- overridden below as needed */
410 ctx->ginit = gcm_init_4bit;
411#if !defined(GHASH_ASM) || defined(INCLUDE_C_GMULT_4BIT)
412 ctx->gmult = gcm_gmult_4bit;
413#else
414 ctx->gmult = NULL;
415#endif
416#if !defined(GHASH_ASM) && !defined(OPENSSL_SMALL_FOOTPRINT)
417 ctx->ghash = gcm_ghash_4bit;
418#else
419 ctx->ghash = NULL;
420#endif
421
422#if defined(GHASH_ASM_X86_OR_64)
423# if !defined(GHASH_ASM_X86) || defined(OPENSSL_IA32_SSE2)
424 /* x86_64 */
425 if (OPENSSL_ia32cap_P[1] & (1 << 1)) { /* check PCLMULQDQ bit */
426 if (((OPENSSL_ia32cap_P[1] >> 22) & 0x41) == 0x41) { /* AVX+MOVBE */
427 ctx->ginit = gcm_init_avx;
428 ctx->gmult = gcm_gmult_avx;
429 ctx->ghash = gcm_ghash_avx;
430 } else {
431 ctx->ginit = gcm_init_clmul;
432 ctx->gmult = gcm_gmult_clmul;
433 ctx->ghash = gcm_ghash_clmul;
434 }
435 return;
436 }
437# endif
438# if defined(GHASH_ASM_X86)
439 /* x86 only */
440# if defined(OPENSSL_IA32_SSE2)
441 if (OPENSSL_ia32cap_P[0] & (1 << 25)) { /* check SSE bit */
442 ctx->gmult = gcm_gmult_4bit_mmx;
443 ctx->ghash = gcm_ghash_4bit_mmx;
444 return;
445 }
446# else
447 if (OPENSSL_ia32cap_P[0] & (1 << 23)) { /* check MMX bit */
448 ctx->gmult = gcm_gmult_4bit_mmx;
449 ctx->ghash = gcm_ghash_4bit_mmx;
450 return;
451 }
452# endif
453 ctx->gmult = gcm_gmult_4bit_x86;
454 ctx->ghash = gcm_ghash_4bit_x86;
455 return;
456# else
457 /* x86_64 fallback defaults */
458 ctx->gmult = gcm_gmult_4bit;
459 ctx->ghash = gcm_ghash_4bit;
460 return;
461# endif
462#elif defined(GHASH_ASM_ARM)
463 /* ARM defaults */
464 ctx->gmult = gcm_gmult_4bit;
465 ctx->ghash = gcm_ghash_4bit;
466# ifdef PMULL_CAPABLE
467 if (PMULL_CAPABLE) {
468 ctx->ginit = (gcm_init_fn)gcm_init_v8;
469 ctx->gmult = gcm_gmult_v8;
470 ctx->ghash = gcm_ghash_v8;
471 }
472# elif defined(NEON_CAPABLE)
473 if (NEON_CAPABLE) {
474 ctx->ginit = gcm_init_neon;
475 ctx->gmult = gcm_gmult_neon;
476 ctx->ghash = gcm_ghash_neon;
477 }
478# endif
479 return;
480#elif defined(GHASH_ASM_SPARC)
481 /* SPARC defaults */
482 ctx->gmult = gcm_gmult_4bit;
483 ctx->ghash = gcm_ghash_4bit;
484 if (OPENSSL_sparcv9cap_P[0] & SPARCV9_VIS3) {
485 ctx->ginit = gcm_init_vis3;
486 ctx->gmult = gcm_gmult_vis3;
487 ctx->ghash = gcm_ghash_vis3;
488 }
489 return;
490#elif defined(GHASH_ASM_PPC)
491 /* PowerPC does not define GHASH_ASM; defaults set above */
492 if (OPENSSL_ppccap_P & PPC_CRYPTO207) {
493 ctx->ginit = gcm_init_p8;
494 ctx->gmult = gcm_gmult_p8;
495 ctx->ghash = gcm_ghash_p8;
496 }
497 return;
498#elif defined(GHASH_ASM_RISCV) && __riscv_xlen == 64
499 /* RISCV defaults; gmult already set above */
500 ctx->ghash = NULL;
501 if (RISCV_HAS_ZBB() && RISCV_HAS_ZBC()) {
502 ctx->ginit = gcm_init_clmul_rv64i_zbb_zbc;
503 ctx->gmult = gcm_gmult_clmul_rv64i_zbb_zbc;
504 }
505 return;
506#elif defined(GHASH_ASM)
507 /* all other architectures use the generic names */
508 ctx->gmult = gcm_gmult_4bit;
509 ctx->ghash = gcm_ghash_4bit;
510 return;
511#endif
512}
513
514void CRYPTO_gcm128_init(GCM128_CONTEXT *ctx, void *key, block128_f block)
515{
516 DECLARE_IS_ENDIAN;
517
518 memset(ctx, 0, sizeof(*ctx));
519 ctx->block = block;
520 ctx->key = key;
521
522 (*block) (ctx->H.c, ctx->H.c, key);
523
524 if (IS_LITTLE_ENDIAN) {
525 /* H is stored in host byte order */
526#ifdef BSWAP8
527 ctx->H.u[0] = BSWAP8(ctx->H.u[0]);
528 ctx->H.u[1] = BSWAP8(ctx->H.u[1]);
529#else
530 u8 *p = ctx->H.c;
531 u64 hi, lo;
532 hi = (u64)GETU32(p) << 32 | GETU32(p + 4);
533 lo = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
534 ctx->H.u[0] = hi;
535 ctx->H.u[1] = lo;
536#endif
537 }
538
539 gcm_get_funcs(&ctx->funcs);
540 ctx->funcs.ginit(ctx->Htable, ctx->H.u);
541}
542
543void CRYPTO_gcm128_setiv(GCM128_CONTEXT *ctx, const unsigned char *iv,
544 size_t len)
545{
546 DECLARE_IS_ENDIAN;
547 unsigned int ctr;
548
549 ctx->len.u[0] = 0; /* AAD length */
550 ctx->len.u[1] = 0; /* message length */
551 ctx->ares = 0;
552 ctx->mres = 0;
553
554 if (len == 12) {
555 memcpy(ctx->Yi.c, iv, 12);
556 ctx->Yi.c[12] = 0;
557 ctx->Yi.c[13] = 0;
558 ctx->Yi.c[14] = 0;
559 ctx->Yi.c[15] = 1;
560 ctr = 1;
561 } else {
562 size_t i;
563 u64 len0 = len;
564
565 /* Borrow ctx->Xi to calculate initial Yi */
566 ctx->Xi.u[0] = 0;
567 ctx->Xi.u[1] = 0;
568
569 while (len >= 16) {
570 for (i = 0; i < 16; ++i)
571 ctx->Xi.c[i] ^= iv[i];
572 GCM_MUL(ctx);
573 iv += 16;
574 len -= 16;
575 }
576 if (len) {
577 for (i = 0; i < len; ++i)
578 ctx->Xi.c[i] ^= iv[i];
579 GCM_MUL(ctx);
580 }
581 len0 <<= 3;
582 if (IS_LITTLE_ENDIAN) {
583#ifdef BSWAP8
584 ctx->Xi.u[1] ^= BSWAP8(len0);
585#else
586 ctx->Xi.c[8] ^= (u8)(len0 >> 56);
587 ctx->Xi.c[9] ^= (u8)(len0 >> 48);
588 ctx->Xi.c[10] ^= (u8)(len0 >> 40);
589 ctx->Xi.c[11] ^= (u8)(len0 >> 32);
590 ctx->Xi.c[12] ^= (u8)(len0 >> 24);
591 ctx->Xi.c[13] ^= (u8)(len0 >> 16);
592 ctx->Xi.c[14] ^= (u8)(len0 >> 8);
593 ctx->Xi.c[15] ^= (u8)(len0);
594#endif
595 } else {
596 ctx->Xi.u[1] ^= len0;
597 }
598
599 GCM_MUL(ctx);
600
601 if (IS_LITTLE_ENDIAN)
602#ifdef BSWAP4
603 ctr = BSWAP4(ctx->Xi.d[3]);
604#else
605 ctr = GETU32(ctx->Xi.c + 12);
606#endif
607 else
608 ctr = ctx->Xi.d[3];
609
610 /* Copy borrowed Xi to Yi */
611 ctx->Yi.u[0] = ctx->Xi.u[0];
612 ctx->Yi.u[1] = ctx->Xi.u[1];
613 }
614
615 ctx->Xi.u[0] = 0;
616 ctx->Xi.u[1] = 0;
617
618 (*ctx->block) (ctx->Yi.c, ctx->EK0.c, ctx->key);
619 ++ctr;
620 if (IS_LITTLE_ENDIAN)
621#ifdef BSWAP4
622 ctx->Yi.d[3] = BSWAP4(ctr);
623#else
624 PUTU32(ctx->Yi.c + 12, ctr);
625#endif
626 else
627 ctx->Yi.d[3] = ctr;
628}
629
630int CRYPTO_gcm128_aad(GCM128_CONTEXT *ctx, const unsigned char *aad,
631 size_t len)
632{
633 size_t i;
634 unsigned int n;
635 u64 alen = ctx->len.u[0];
636
637 if (ctx->len.u[1])
638 return -2;
639
640 alen += len;
641 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
642 return -1;
643 ctx->len.u[0] = alen;
644
645 n = ctx->ares;
646 if (n) {
647 while (n && len) {
648 ctx->Xi.c[n] ^= *(aad++);
649 --len;
650 n = (n + 1) % 16;
651 }
652 if (n == 0)
653 GCM_MUL(ctx);
654 else {
655 ctx->ares = n;
656 return 0;
657 }
658 }
659#ifdef GHASH
660 if ((i = (len & (size_t)-16))) {
661 GHASH(ctx, aad, i);
662 aad += i;
663 len -= i;
664 }
665#else
666 while (len >= 16) {
667 for (i = 0; i < 16; ++i)
668 ctx->Xi.c[i] ^= aad[i];
669 GCM_MUL(ctx);
670 aad += 16;
671 len -= 16;
672 }
673#endif
674 if (len) {
675 n = (unsigned int)len;
676 for (i = 0; i < len; ++i)
677 ctx->Xi.c[i] ^= aad[i];
678 }
679
680 ctx->ares = n;
681 return 0;
682}
683
684int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx,
685 const unsigned char *in, unsigned char *out,
686 size_t len)
687{
688 DECLARE_IS_ENDIAN;
689 unsigned int n, ctr, mres;
690 size_t i;
691 u64 mlen = ctx->len.u[1];
692 block128_f block = ctx->block;
693 void *key = ctx->key;
694
695 mlen += len;
696 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
697 return -1;
698 ctx->len.u[1] = mlen;
699
700 mres = ctx->mres;
701
702 if (ctx->ares) {
703 /* First call to encrypt finalizes GHASH(AAD) */
704#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
705 if (len == 0) {
706 GCM_MUL(ctx);
707 ctx->ares = 0;
708 return 0;
709 }
710 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
711 ctx->Xi.u[0] = 0;
712 ctx->Xi.u[1] = 0;
713 mres = sizeof(ctx->Xi);
714#else
715 GCM_MUL(ctx);
716#endif
717 ctx->ares = 0;
718 }
719
720 if (IS_LITTLE_ENDIAN)
721#ifdef BSWAP4
722 ctr = BSWAP4(ctx->Yi.d[3]);
723#else
724 ctr = GETU32(ctx->Yi.c + 12);
725#endif
726 else
727 ctr = ctx->Yi.d[3];
728
729 n = mres % 16;
730#if !defined(OPENSSL_SMALL_FOOTPRINT)
731 if (16 % sizeof(size_t) == 0) { /* always true actually */
732 do {
733 if (n) {
734# if defined(GHASH)
735 while (n && len) {
736 ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
737 --len;
738 n = (n + 1) % 16;
739 }
740 if (n == 0) {
741 GHASH(ctx, ctx->Xn, mres);
742 mres = 0;
743 } else {
744 ctx->mres = mres;
745 return 0;
746 }
747# else
748 while (n && len) {
749 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
750 --len;
751 n = (n + 1) % 16;
752 }
753 if (n == 0) {
754 GCM_MUL(ctx);
755 mres = 0;
756 } else {
757 ctx->mres = n;
758 return 0;
759 }
760# endif
761 }
762# if defined(STRICT_ALIGNMENT)
763 if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
764 break;
765# endif
766# if defined(GHASH)
767 if (len >= 16 && mres) {
768 GHASH(ctx, ctx->Xn, mres);
769 mres = 0;
770 }
771# if defined(GHASH_CHUNK)
772 while (len >= GHASH_CHUNK) {
773 size_t j = GHASH_CHUNK;
774
775 while (j) {
776 size_t_aX *out_t = (size_t_aX *)out;
777 const size_t_aX *in_t = (const size_t_aX *)in;
778
779 (*block) (ctx->Yi.c, ctx->EKi.c, key);
780 ++ctr;
781 if (IS_LITTLE_ENDIAN)
782# ifdef BSWAP4
783 ctx->Yi.d[3] = BSWAP4(ctr);
784# else
785 PUTU32(ctx->Yi.c + 12, ctr);
786# endif
787 else
788 ctx->Yi.d[3] = ctr;
789 for (i = 0; i < 16 / sizeof(size_t); ++i)
790 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
791 out += 16;
792 in += 16;
793 j -= 16;
794 }
795 GHASH(ctx, out - GHASH_CHUNK, GHASH_CHUNK);
796 len -= GHASH_CHUNK;
797 }
798# endif
799 if ((i = (len & (size_t)-16))) {
800 size_t j = i;
801
802 while (len >= 16) {
803 size_t_aX *out_t = (size_t_aX *)out;
804 const size_t_aX *in_t = (const size_t_aX *)in;
805
806 (*block) (ctx->Yi.c, ctx->EKi.c, key);
807 ++ctr;
808 if (IS_LITTLE_ENDIAN)
809# ifdef BSWAP4
810 ctx->Yi.d[3] = BSWAP4(ctr);
811# else
812 PUTU32(ctx->Yi.c + 12, ctr);
813# endif
814 else
815 ctx->Yi.d[3] = ctr;
816 for (i = 0; i < 16 / sizeof(size_t); ++i)
817 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
818 out += 16;
819 in += 16;
820 len -= 16;
821 }
822 GHASH(ctx, out - j, j);
823 }
824# else
825 while (len >= 16) {
826 size_t *out_t = (size_t *)out;
827 const size_t *in_t = (const size_t *)in;
828
829 (*block) (ctx->Yi.c, ctx->EKi.c, key);
830 ++ctr;
831 if (IS_LITTLE_ENDIAN)
832# ifdef BSWAP4
833 ctx->Yi.d[3] = BSWAP4(ctr);
834# else
835 PUTU32(ctx->Yi.c + 12, ctr);
836# endif
837 else
838 ctx->Yi.d[3] = ctr;
839 for (i = 0; i < 16 / sizeof(size_t); ++i)
840 ctx->Xi.t[i] ^= out_t[i] = in_t[i] ^ ctx->EKi.t[i];
841 GCM_MUL(ctx);
842 out += 16;
843 in += 16;
844 len -= 16;
845 }
846# endif
847 if (len) {
848 (*block) (ctx->Yi.c, ctx->EKi.c, key);
849 ++ctr;
850 if (IS_LITTLE_ENDIAN)
851# ifdef BSWAP4
852 ctx->Yi.d[3] = BSWAP4(ctr);
853# else
854 PUTU32(ctx->Yi.c + 12, ctr);
855# endif
856 else
857 ctx->Yi.d[3] = ctr;
858# if defined(GHASH)
859 while (len--) {
860 ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
861 ++n;
862 }
863# else
864 while (len--) {
865 ctx->Xi.c[n] ^= out[n] = in[n] ^ ctx->EKi.c[n];
866 ++n;
867 }
868 mres = n;
869# endif
870 }
871
872 ctx->mres = mres;
873 return 0;
874 } while (0);
875 }
876#endif
877 for (i = 0; i < len; ++i) {
878 if (n == 0) {
879 (*block) (ctx->Yi.c, ctx->EKi.c, key);
880 ++ctr;
881 if (IS_LITTLE_ENDIAN)
882#ifdef BSWAP4
883 ctx->Yi.d[3] = BSWAP4(ctr);
884#else
885 PUTU32(ctx->Yi.c + 12, ctr);
886#endif
887 else
888 ctx->Yi.d[3] = ctr;
889 }
890#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
891 ctx->Xn[mres++] = out[i] = in[i] ^ ctx->EKi.c[n];
892 n = (n + 1) % 16;
893 if (mres == sizeof(ctx->Xn)) {
894 GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
895 mres = 0;
896 }
897#else
898 ctx->Xi.c[n] ^= out[i] = in[i] ^ ctx->EKi.c[n];
899 mres = n = (n + 1) % 16;
900 if (n == 0)
901 GCM_MUL(ctx);
902#endif
903 }
904
905 ctx->mres = mres;
906 return 0;
907}
908
909int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx,
910 const unsigned char *in, unsigned char *out,
911 size_t len)
912{
913 DECLARE_IS_ENDIAN;
914 unsigned int n, ctr, mres;
915 size_t i;
916 u64 mlen = ctx->len.u[1];
917 block128_f block = ctx->block;
918 void *key = ctx->key;
919
920 mlen += len;
921 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
922 return -1;
923 ctx->len.u[1] = mlen;
924
925 mres = ctx->mres;
926
927 if (ctx->ares) {
928 /* First call to decrypt finalizes GHASH(AAD) */
929#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
930 if (len == 0) {
931 GCM_MUL(ctx);
932 ctx->ares = 0;
933 return 0;
934 }
935 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
936 ctx->Xi.u[0] = 0;
937 ctx->Xi.u[1] = 0;
938 mres = sizeof(ctx->Xi);
939#else
940 GCM_MUL(ctx);
941#endif
942 ctx->ares = 0;
943 }
944
945 if (IS_LITTLE_ENDIAN)
946#ifdef BSWAP4
947 ctr = BSWAP4(ctx->Yi.d[3]);
948#else
949 ctr = GETU32(ctx->Yi.c + 12);
950#endif
951 else
952 ctr = ctx->Yi.d[3];
953
954 n = mres % 16;
955#if !defined(OPENSSL_SMALL_FOOTPRINT)
956 if (16 % sizeof(size_t) == 0) { /* always true actually */
957 do {
958 if (n) {
959# if defined(GHASH)
960 while (n && len) {
961 *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
962 --len;
963 n = (n + 1) % 16;
964 }
965 if (n == 0) {
966 GHASH(ctx, ctx->Xn, mres);
967 mres = 0;
968 } else {
969 ctx->mres = mres;
970 return 0;
971 }
972# else
973 while (n && len) {
974 u8 c = *(in++);
975 *(out++) = c ^ ctx->EKi.c[n];
976 ctx->Xi.c[n] ^= c;
977 --len;
978 n = (n + 1) % 16;
979 }
980 if (n == 0) {
981 GCM_MUL(ctx);
982 mres = 0;
983 } else {
984 ctx->mres = n;
985 return 0;
986 }
987# endif
988 }
989# if defined(STRICT_ALIGNMENT)
990 if (((size_t)in | (size_t)out) % sizeof(size_t) != 0)
991 break;
992# endif
993# if defined(GHASH)
994 if (len >= 16 && mres) {
995 GHASH(ctx, ctx->Xn, mres);
996 mres = 0;
997 }
998# if defined(GHASH_CHUNK)
999 while (len >= GHASH_CHUNK) {
1000 size_t j = GHASH_CHUNK;
1001
1002 GHASH(ctx, in, GHASH_CHUNK);
1003 while (j) {
1004 size_t_aX *out_t = (size_t_aX *)out;
1005 const size_t_aX *in_t = (const size_t_aX *)in;
1006
1007 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1008 ++ctr;
1009 if (IS_LITTLE_ENDIAN)
1010# ifdef BSWAP4
1011 ctx->Yi.d[3] = BSWAP4(ctr);
1012# else
1013 PUTU32(ctx->Yi.c + 12, ctr);
1014# endif
1015 else
1016 ctx->Yi.d[3] = ctr;
1017 for (i = 0; i < 16 / sizeof(size_t); ++i)
1018 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1019 out += 16;
1020 in += 16;
1021 j -= 16;
1022 }
1023 len -= GHASH_CHUNK;
1024 }
1025# endif
1026 if ((i = (len & (size_t)-16))) {
1027 GHASH(ctx, in, i);
1028 while (len >= 16) {
1029 size_t_aX *out_t = (size_t_aX *)out;
1030 const size_t_aX *in_t = (const size_t_aX *)in;
1031
1032 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1033 ++ctr;
1034 if (IS_LITTLE_ENDIAN)
1035# ifdef BSWAP4
1036 ctx->Yi.d[3] = BSWAP4(ctr);
1037# else
1038 PUTU32(ctx->Yi.c + 12, ctr);
1039# endif
1040 else
1041 ctx->Yi.d[3] = ctr;
1042 for (i = 0; i < 16 / sizeof(size_t); ++i)
1043 out_t[i] = in_t[i] ^ ctx->EKi.t[i];
1044 out += 16;
1045 in += 16;
1046 len -= 16;
1047 }
1048 }
1049# else
1050 while (len >= 16) {
1051 size_t *out_t = (size_t *)out;
1052 const size_t *in_t = (const size_t *)in;
1053
1054 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1055 ++ctr;
1056 if (IS_LITTLE_ENDIAN)
1057# ifdef BSWAP4
1058 ctx->Yi.d[3] = BSWAP4(ctr);
1059# else
1060 PUTU32(ctx->Yi.c + 12, ctr);
1061# endif
1062 else
1063 ctx->Yi.d[3] = ctr;
1064 for (i = 0; i < 16 / sizeof(size_t); ++i) {
1065 size_t c = in_t[i];
1066 out_t[i] = c ^ ctx->EKi.t[i];
1067 ctx->Xi.t[i] ^= c;
1068 }
1069 GCM_MUL(ctx);
1070 out += 16;
1071 in += 16;
1072 len -= 16;
1073 }
1074# endif
1075 if (len) {
1076 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1077 ++ctr;
1078 if (IS_LITTLE_ENDIAN)
1079# ifdef BSWAP4
1080 ctx->Yi.d[3] = BSWAP4(ctr);
1081# else
1082 PUTU32(ctx->Yi.c + 12, ctr);
1083# endif
1084 else
1085 ctx->Yi.d[3] = ctr;
1086# if defined(GHASH)
1087 while (len--) {
1088 out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
1089 ++n;
1090 }
1091# else
1092 while (len--) {
1093 u8 c = in[n];
1094 ctx->Xi.c[n] ^= c;
1095 out[n] = c ^ ctx->EKi.c[n];
1096 ++n;
1097 }
1098 mres = n;
1099# endif
1100 }
1101
1102 ctx->mres = mres;
1103 return 0;
1104 } while (0);
1105 }
1106#endif
1107 for (i = 0; i < len; ++i) {
1108 u8 c;
1109 if (n == 0) {
1110 (*block) (ctx->Yi.c, ctx->EKi.c, key);
1111 ++ctr;
1112 if (IS_LITTLE_ENDIAN)
1113#ifdef BSWAP4
1114 ctx->Yi.d[3] = BSWAP4(ctr);
1115#else
1116 PUTU32(ctx->Yi.c + 12, ctr);
1117#endif
1118 else
1119 ctx->Yi.d[3] = ctr;
1120 }
1121#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1122 out[i] = (ctx->Xn[mres++] = c = in[i]) ^ ctx->EKi.c[n];
1123 n = (n + 1) % 16;
1124 if (mres == sizeof(ctx->Xn)) {
1125 GHASH(ctx,ctx->Xn,sizeof(ctx->Xn));
1126 mres = 0;
1127 }
1128#else
1129 c = in[i];
1130 out[i] = c ^ ctx->EKi.c[n];
1131 ctx->Xi.c[n] ^= c;
1132 mres = n = (n + 1) % 16;
1133 if (n == 0)
1134 GCM_MUL(ctx);
1135#endif
1136 }
1137
1138 ctx->mres = mres;
1139 return 0;
1140}
1141
1142int CRYPTO_gcm128_encrypt_ctr32(GCM128_CONTEXT *ctx,
1143 const unsigned char *in, unsigned char *out,
1144 size_t len, ctr128_f stream)
1145{
1146#if defined(OPENSSL_SMALL_FOOTPRINT)
1147 return CRYPTO_gcm128_encrypt(ctx, in, out, len);
1148#else
1149 DECLARE_IS_ENDIAN;
1150 unsigned int n, ctr, mres;
1151 size_t i;
1152 u64 mlen = ctx->len.u[1];
1153 void *key = ctx->key;
1154
1155 mlen += len;
1156 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1157 return -1;
1158 ctx->len.u[1] = mlen;
1159
1160 mres = ctx->mres;
1161
1162 if (ctx->ares) {
1163 /* First call to encrypt finalizes GHASH(AAD) */
1164#if defined(GHASH)
1165 if (len == 0) {
1166 GCM_MUL(ctx);
1167 ctx->ares = 0;
1168 return 0;
1169 }
1170 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1171 ctx->Xi.u[0] = 0;
1172 ctx->Xi.u[1] = 0;
1173 mres = sizeof(ctx->Xi);
1174#else
1175 GCM_MUL(ctx);
1176#endif
1177 ctx->ares = 0;
1178 }
1179
1180 if (IS_LITTLE_ENDIAN)
1181# ifdef BSWAP4
1182 ctr = BSWAP4(ctx->Yi.d[3]);
1183# else
1184 ctr = GETU32(ctx->Yi.c + 12);
1185# endif
1186 else
1187 ctr = ctx->Yi.d[3];
1188
1189 n = mres % 16;
1190 if (n) {
1191# if defined(GHASH)
1192 while (n && len) {
1193 ctx->Xn[mres++] = *(out++) = *(in++) ^ ctx->EKi.c[n];
1194 --len;
1195 n = (n + 1) % 16;
1196 }
1197 if (n == 0) {
1198 GHASH(ctx, ctx->Xn, mres);
1199 mres = 0;
1200 } else {
1201 ctx->mres = mres;
1202 return 0;
1203 }
1204# else
1205 while (n && len) {
1206 ctx->Xi.c[n] ^= *(out++) = *(in++) ^ ctx->EKi.c[n];
1207 --len;
1208 n = (n + 1) % 16;
1209 }
1210 if (n == 0) {
1211 GCM_MUL(ctx);
1212 mres = 0;
1213 } else {
1214 ctx->mres = n;
1215 return 0;
1216 }
1217# endif
1218 }
1219# if defined(GHASH)
1220 if (len >= 16 && mres) {
1221 GHASH(ctx, ctx->Xn, mres);
1222 mres = 0;
1223 }
1224# if defined(GHASH_CHUNK)
1225 while (len >= GHASH_CHUNK) {
1226 (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1227 ctr += GHASH_CHUNK / 16;
1228 if (IS_LITTLE_ENDIAN)
1229# ifdef BSWAP4
1230 ctx->Yi.d[3] = BSWAP4(ctr);
1231# else
1232 PUTU32(ctx->Yi.c + 12, ctr);
1233# endif
1234 else
1235 ctx->Yi.d[3] = ctr;
1236 GHASH(ctx, out, GHASH_CHUNK);
1237 out += GHASH_CHUNK;
1238 in += GHASH_CHUNK;
1239 len -= GHASH_CHUNK;
1240 }
1241# endif
1242# endif
1243 if ((i = (len & (size_t)-16))) {
1244 size_t j = i / 16;
1245
1246 (*stream) (in, out, j, key, ctx->Yi.c);
1247 ctr += (unsigned int)j;
1248 if (IS_LITTLE_ENDIAN)
1249# ifdef BSWAP4
1250 ctx->Yi.d[3] = BSWAP4(ctr);
1251# else
1252 PUTU32(ctx->Yi.c + 12, ctr);
1253# endif
1254 else
1255 ctx->Yi.d[3] = ctr;
1256 in += i;
1257 len -= i;
1258# if defined(GHASH)
1259 GHASH(ctx, out, i);
1260 out += i;
1261# else
1262 while (j--) {
1263 for (i = 0; i < 16; ++i)
1264 ctx->Xi.c[i] ^= out[i];
1265 GCM_MUL(ctx);
1266 out += 16;
1267 }
1268# endif
1269 }
1270 if (len) {
1271 (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
1272 ++ctr;
1273 if (IS_LITTLE_ENDIAN)
1274# ifdef BSWAP4
1275 ctx->Yi.d[3] = BSWAP4(ctr);
1276# else
1277 PUTU32(ctx->Yi.c + 12, ctr);
1278# endif
1279 else
1280 ctx->Yi.d[3] = ctr;
1281 while (len--) {
1282# if defined(GHASH)
1283 ctx->Xn[mres++] = out[n] = in[n] ^ ctx->EKi.c[n];
1284# else
1285 ctx->Xi.c[mres++] ^= out[n] = in[n] ^ ctx->EKi.c[n];
1286# endif
1287 ++n;
1288 }
1289 }
1290
1291 ctx->mres = mres;
1292 return 0;
1293#endif
1294}
1295
1296int CRYPTO_gcm128_decrypt_ctr32(GCM128_CONTEXT *ctx,
1297 const unsigned char *in, unsigned char *out,
1298 size_t len, ctr128_f stream)
1299{
1300#if defined(OPENSSL_SMALL_FOOTPRINT)
1301 return CRYPTO_gcm128_decrypt(ctx, in, out, len);
1302#else
1303 DECLARE_IS_ENDIAN;
1304 unsigned int n, ctr, mres;
1305 size_t i;
1306 u64 mlen = ctx->len.u[1];
1307 void *key = ctx->key;
1308
1309 mlen += len;
1310 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1311 return -1;
1312 ctx->len.u[1] = mlen;
1313
1314 mres = ctx->mres;
1315
1316 if (ctx->ares) {
1317 /* First call to decrypt finalizes GHASH(AAD) */
1318# if defined(GHASH)
1319 if (len == 0) {
1320 GCM_MUL(ctx);
1321 ctx->ares = 0;
1322 return 0;
1323 }
1324 memcpy(ctx->Xn, ctx->Xi.c, sizeof(ctx->Xi));
1325 ctx->Xi.u[0] = 0;
1326 ctx->Xi.u[1] = 0;
1327 mres = sizeof(ctx->Xi);
1328# else
1329 GCM_MUL(ctx);
1330# endif
1331 ctx->ares = 0;
1332 }
1333
1334 if (IS_LITTLE_ENDIAN)
1335# ifdef BSWAP4
1336 ctr = BSWAP4(ctx->Yi.d[3]);
1337# else
1338 ctr = GETU32(ctx->Yi.c + 12);
1339# endif
1340 else
1341 ctr = ctx->Yi.d[3];
1342
1343 n = mres % 16;
1344 if (n) {
1345# if defined(GHASH)
1346 while (n && len) {
1347 *(out++) = (ctx->Xn[mres++] = *(in++)) ^ ctx->EKi.c[n];
1348 --len;
1349 n = (n + 1) % 16;
1350 }
1351 if (n == 0) {
1352 GHASH(ctx, ctx->Xn, mres);
1353 mres = 0;
1354 } else {
1355 ctx->mres = mres;
1356 return 0;
1357 }
1358# else
1359 while (n && len) {
1360 u8 c = *(in++);
1361 *(out++) = c ^ ctx->EKi.c[n];
1362 ctx->Xi.c[n] ^= c;
1363 --len;
1364 n = (n + 1) % 16;
1365 }
1366 if (n == 0) {
1367 GCM_MUL(ctx);
1368 mres = 0;
1369 } else {
1370 ctx->mres = n;
1371 return 0;
1372 }
1373# endif
1374 }
1375# if defined(GHASH)
1376 if (len >= 16 && mres) {
1377 GHASH(ctx, ctx->Xn, mres);
1378 mres = 0;
1379 }
1380# if defined(GHASH_CHUNK)
1381 while (len >= GHASH_CHUNK) {
1382 GHASH(ctx, in, GHASH_CHUNK);
1383 (*stream) (in, out, GHASH_CHUNK / 16, key, ctx->Yi.c);
1384 ctr += GHASH_CHUNK / 16;
1385 if (IS_LITTLE_ENDIAN)
1386# ifdef BSWAP4
1387 ctx->Yi.d[3] = BSWAP4(ctr);
1388# else
1389 PUTU32(ctx->Yi.c + 12, ctr);
1390# endif
1391 else
1392 ctx->Yi.d[3] = ctr;
1393 out += GHASH_CHUNK;
1394 in += GHASH_CHUNK;
1395 len -= GHASH_CHUNK;
1396 }
1397# endif
1398# endif
1399 if ((i = (len & (size_t)-16))) {
1400 size_t j = i / 16;
1401
1402# if defined(GHASH)
1403 GHASH(ctx, in, i);
1404# else
1405 while (j--) {
1406 size_t k;
1407 for (k = 0; k < 16; ++k)
1408 ctx->Xi.c[k] ^= in[k];
1409 GCM_MUL(ctx);
1410 in += 16;
1411 }
1412 j = i / 16;
1413 in -= i;
1414# endif
1415 (*stream) (in, out, j, key, ctx->Yi.c);
1416 ctr += (unsigned int)j;
1417 if (IS_LITTLE_ENDIAN)
1418# ifdef BSWAP4
1419 ctx->Yi.d[3] = BSWAP4(ctr);
1420# else
1421 PUTU32(ctx->Yi.c + 12, ctr);
1422# endif
1423 else
1424 ctx->Yi.d[3] = ctr;
1425 out += i;
1426 in += i;
1427 len -= i;
1428 }
1429 if (len) {
1430 (*ctx->block) (ctx->Yi.c, ctx->EKi.c, key);
1431 ++ctr;
1432 if (IS_LITTLE_ENDIAN)
1433# ifdef BSWAP4
1434 ctx->Yi.d[3] = BSWAP4(ctr);
1435# else
1436 PUTU32(ctx->Yi.c + 12, ctr);
1437# endif
1438 else
1439 ctx->Yi.d[3] = ctr;
1440 while (len--) {
1441# if defined(GHASH)
1442 out[n] = (ctx->Xn[mres++] = in[n]) ^ ctx->EKi.c[n];
1443# else
1444 u8 c = in[n];
1445 ctx->Xi.c[mres++] ^= c;
1446 out[n] = c ^ ctx->EKi.c[n];
1447# endif
1448 ++n;
1449 }
1450 }
1451
1452 ctx->mres = mres;
1453 return 0;
1454#endif
1455}
1456
1457int CRYPTO_gcm128_finish(GCM128_CONTEXT *ctx, const unsigned char *tag,
1458 size_t len)
1459{
1460 DECLARE_IS_ENDIAN;
1461 u64 alen = ctx->len.u[0] << 3;
1462 u64 clen = ctx->len.u[1] << 3;
1463
1464#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1465 u128 bitlen;
1466 unsigned int mres = ctx->mres;
1467
1468 if (mres) {
1469 unsigned blocks = (mres + 15) & -16;
1470
1471 memset(ctx->Xn + mres, 0, blocks - mres);
1472 mres = blocks;
1473 if (mres == sizeof(ctx->Xn)) {
1474 GHASH(ctx, ctx->Xn, mres);
1475 mres = 0;
1476 }
1477 } else if (ctx->ares) {
1478 GCM_MUL(ctx);
1479 }
1480#else
1481 if (ctx->mres || ctx->ares)
1482 GCM_MUL(ctx);
1483#endif
1484
1485 if (IS_LITTLE_ENDIAN) {
1486#ifdef BSWAP8
1487 alen = BSWAP8(alen);
1488 clen = BSWAP8(clen);
1489#else
1490 u8 *p = ctx->len.c;
1491
1492 ctx->len.u[0] = alen;
1493 ctx->len.u[1] = clen;
1494
1495 alen = (u64)GETU32(p) << 32 | GETU32(p + 4);
1496 clen = (u64)GETU32(p + 8) << 32 | GETU32(p + 12);
1497#endif
1498 }
1499
1500#if defined(GHASH) && !defined(OPENSSL_SMALL_FOOTPRINT)
1501 bitlen.hi = alen;
1502 bitlen.lo = clen;
1503 memcpy(ctx->Xn + mres, &bitlen, sizeof(bitlen));
1504 mres += sizeof(bitlen);
1505 GHASH(ctx, ctx->Xn, mres);
1506#else
1507 ctx->Xi.u[0] ^= alen;
1508 ctx->Xi.u[1] ^= clen;
1509 GCM_MUL(ctx);
1510#endif
1511
1512 ctx->Xi.u[0] ^= ctx->EK0.u[0];
1513 ctx->Xi.u[1] ^= ctx->EK0.u[1];
1514
1515 if (tag && len <= sizeof(ctx->Xi))
1516 return CRYPTO_memcmp(ctx->Xi.c, tag, len);
1517 else
1518 return -1;
1519}
1520
1521void CRYPTO_gcm128_tag(GCM128_CONTEXT *ctx, unsigned char *tag, size_t len)
1522{
1523 CRYPTO_gcm128_finish(ctx, NULL, 0);
1524 memcpy(tag, ctx->Xi.c,
1525 len <= sizeof(ctx->Xi.c) ? len : sizeof(ctx->Xi.c));
1526}
1527
1528GCM128_CONTEXT *CRYPTO_gcm128_new(void *key, block128_f block)
1529{
1530 GCM128_CONTEXT *ret;
1531
1532 if ((ret = OPENSSL_malloc(sizeof(*ret))) != NULL)
1533 CRYPTO_gcm128_init(ret, key, block);
1534
1535 return ret;
1536}
1537
1538void CRYPTO_gcm128_release(GCM128_CONTEXT *ctx)
1539{
1540 OPENSSL_clear_free(ctx, sizeof(*ctx));
1541}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette