87 #if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_KECCAK 88 #define SPH_SMALL_FOOTPRINT_KECCAK 1 95 #if !defined SPH_KECCAK_64 && SPH_64 \ 96 && !(defined __i386__ || SPH_I386_GCC || SPH_I386_MSVC) 97 #define SPH_KECCAK_64 1 103 #if !SPH_KECCAK_64 && !defined SPH_KECCAK_INTERLEAVE 104 #define SPH_KECCAK_INTERLEAVE 1 110 #ifndef SPH_KECCAK_UNROLL 111 #if SPH_SMALL_FOOTPRINT_KECCAK 112 #define SPH_KECCAK_UNROLL 2 114 #define SPH_KECCAK_UNROLL 8 122 #ifndef SPH_KECCAK_NOCOPY 123 #if defined __i386__ || defined __x86_64 || SPH_I386_MSVC || SPH_I386_GCC 124 #define SPH_KECCAK_NOCOPY 1 126 #define SPH_KECCAK_NOCOPY 0 131 #pragma warning (disable: 4146) 136 static const sph_u64
RC[] = {
137 SPH_C64(0x0000000000000001), SPH_C64(0x0000000000008082),
138 SPH_C64(0x800000000000808A), SPH_C64(0x8000000080008000),
139 SPH_C64(0x000000000000808B), SPH_C64(0x0000000080000001),
140 SPH_C64(0x8000000080008081), SPH_C64(0x8000000000008009),
141 SPH_C64(0x000000000000008A), SPH_C64(0x0000000000000088),
142 SPH_C64(0x0000000080008009), SPH_C64(0x000000008000000A),
143 SPH_C64(0x000000008000808B), SPH_C64(0x800000000000008B),
144 SPH_C64(0x8000000000008089), SPH_C64(0x8000000000008003),
145 SPH_C64(0x8000000000008002), SPH_C64(0x8000000000000080),
146 SPH_C64(0x000000000000800A), SPH_C64(0x800000008000000A),
147 SPH_C64(0x8000000080008081), SPH_C64(0x8000000000008080),
148 SPH_C64(0x0000000080000001), SPH_C64(0x8000000080008008)
151 #if SPH_KECCAK_NOCOPY 153 #define a00 (kc->u.wide[ 0]) 154 #define a10 (kc->u.wide[ 1]) 155 #define a20 (kc->u.wide[ 2]) 156 #define a30 (kc->u.wide[ 3]) 157 #define a40 (kc->u.wide[ 4]) 158 #define a01 (kc->u.wide[ 5]) 159 #define a11 (kc->u.wide[ 6]) 160 #define a21 (kc->u.wide[ 7]) 161 #define a31 (kc->u.wide[ 8]) 162 #define a41 (kc->u.wide[ 9]) 163 #define a02 (kc->u.wide[10]) 164 #define a12 (kc->u.wide[11]) 165 #define a22 (kc->u.wide[12]) 166 #define a32 (kc->u.wide[13]) 167 #define a42 (kc->u.wide[14]) 168 #define a03 (kc->u.wide[15]) 169 #define a13 (kc->u.wide[16]) 170 #define a23 (kc->u.wide[17]) 171 #define a33 (kc->u.wide[18]) 172 #define a43 (kc->u.wide[19]) 173 #define a04 (kc->u.wide[20]) 174 #define a14 (kc->u.wide[21]) 175 #define a24 (kc->u.wide[22]) 176 #define a34 (kc->u.wide[23]) 177 #define a44 (kc->u.wide[24]) 180 #define READ_STATE(sc) 181 #define WRITE_STATE(sc) 183 #define INPUT_BUF(size) do { \ 185 for (j = 0; j < (size); j += 8) { \ 186 kc->u.wide[j >> 3] ^= sph_dec64le_aligned(buf + j); \ 190 #define INPUT_BUF144 INPUT_BUF(144) 191 #define INPUT_BUF136 INPUT_BUF(136) 192 #define INPUT_BUF104 INPUT_BUF(104) 193 #define INPUT_BUF72 INPUT_BUF(72) 198 sph_u64 a00, a01, a02, a03, a04; \ 199 sph_u64 a10, a11, a12, a13, a14; \ 200 sph_u64 a20, a21, a22, a23, a24; \ 201 sph_u64 a30, a31, a32, a33, a34; \ 202 sph_u64 a40, a41, a42, a43, a44; 204 #define READ_STATE(state) do { \ 205 a00 = (state)->u.wide[ 0]; \ 206 a10 = (state)->u.wide[ 1]; \ 207 a20 = (state)->u.wide[ 2]; \ 208 a30 = (state)->u.wide[ 3]; \ 209 a40 = (state)->u.wide[ 4]; \ 210 a01 = (state)->u.wide[ 5]; \ 211 a11 = (state)->u.wide[ 6]; \ 212 a21 = (state)->u.wide[ 7]; \ 213 a31 = (state)->u.wide[ 8]; \ 214 a41 = (state)->u.wide[ 9]; \ 215 a02 = (state)->u.wide[10]; \ 216 a12 = (state)->u.wide[11]; \ 217 a22 = (state)->u.wide[12]; \ 218 a32 = (state)->u.wide[13]; \ 219 a42 = (state)->u.wide[14]; \ 220 a03 = (state)->u.wide[15]; \ 221 a13 = (state)->u.wide[16]; \ 222 a23 = (state)->u.wide[17]; \ 223 a33 = (state)->u.wide[18]; \ 224 a43 = (state)->u.wide[19]; \ 225 a04 = (state)->u.wide[20]; \ 226 a14 = (state)->u.wide[21]; \ 227 a24 = (state)->u.wide[22]; \ 228 a34 = (state)->u.wide[23]; \ 229 a44 = (state)->u.wide[24]; \ 232 #define WRITE_STATE(state) do { \ 233 (state)->u.wide[ 0] = a00; \ 234 (state)->u.wide[ 1] = a10; \ 235 (state)->u.wide[ 2] = a20; \ 236 (state)->u.wide[ 3] = a30; \ 237 (state)->u.wide[ 4] = a40; \ 238 (state)->u.wide[ 5] = a01; \ 239 (state)->u.wide[ 6] = a11; \ 240 (state)->u.wide[ 7] = a21; \ 241 (state)->u.wide[ 8] = a31; \ 242 (state)->u.wide[ 9] = a41; \ 243 (state)->u.wide[10] = a02; \ 244 (state)->u.wide[11] = a12; \ 245 (state)->u.wide[12] = a22; \ 246 (state)->u.wide[13] = a32; \ 247 (state)->u.wide[14] = a42; \ 248 (state)->u.wide[15] = a03; \ 249 (state)->u.wide[16] = a13; \ 250 (state)->u.wide[17] = a23; \ 251 (state)->u.wide[18] = a33; \ 252 (state)->u.wide[19] = a43; \ 253 (state)->u.wide[20] = a04; \ 254 (state)->u.wide[21] = a14; \ 255 (state)->u.wide[22] = a24; \ 256 (state)->u.wide[23] = a34; \ 257 (state)->u.wide[24] = a44; \ 260 #define INPUT_BUF144 do { \ 261 a00 ^= sph_dec64le_aligned(buf + 0); \ 262 a10 ^= sph_dec64le_aligned(buf + 8); \ 263 a20 ^= sph_dec64le_aligned(buf + 16); \ 264 a30 ^= sph_dec64le_aligned(buf + 24); \ 265 a40 ^= sph_dec64le_aligned(buf + 32); \ 266 a01 ^= sph_dec64le_aligned(buf + 40); \ 267 a11 ^= sph_dec64le_aligned(buf + 48); \ 268 a21 ^= sph_dec64le_aligned(buf + 56); \ 269 a31 ^= sph_dec64le_aligned(buf + 64); \ 270 a41 ^= sph_dec64le_aligned(buf + 72); \ 271 a02 ^= sph_dec64le_aligned(buf + 80); \ 272 a12 ^= sph_dec64le_aligned(buf + 88); \ 273 a22 ^= sph_dec64le_aligned(buf + 96); \ 274 a32 ^= sph_dec64le_aligned(buf + 104); \ 275 a42 ^= sph_dec64le_aligned(buf + 112); \ 276 a03 ^= sph_dec64le_aligned(buf + 120); \ 277 a13 ^= sph_dec64le_aligned(buf + 128); \ 278 a23 ^= sph_dec64le_aligned(buf + 136); \ 281 #define INPUT_BUF136 do { \ 282 a00 ^= sph_dec64le_aligned(buf + 0); \ 283 a10 ^= sph_dec64le_aligned(buf + 8); \ 284 a20 ^= sph_dec64le_aligned(buf + 16); \ 285 a30 ^= sph_dec64le_aligned(buf + 24); \ 286 a40 ^= sph_dec64le_aligned(buf + 32); \ 287 a01 ^= sph_dec64le_aligned(buf + 40); \ 288 a11 ^= sph_dec64le_aligned(buf + 48); \ 289 a21 ^= sph_dec64le_aligned(buf + 56); \ 290 a31 ^= sph_dec64le_aligned(buf + 64); \ 291 a41 ^= sph_dec64le_aligned(buf + 72); \ 292 a02 ^= sph_dec64le_aligned(buf + 80); \ 293 a12 ^= sph_dec64le_aligned(buf + 88); \ 294 a22 ^= sph_dec64le_aligned(buf + 96); \ 295 a32 ^= sph_dec64le_aligned(buf + 104); \ 296 a42 ^= sph_dec64le_aligned(buf + 112); \ 297 a03 ^= sph_dec64le_aligned(buf + 120); \ 298 a13 ^= sph_dec64le_aligned(buf + 128); \ 301 #define INPUT_BUF104 do { \ 302 a00 ^= sph_dec64le_aligned(buf + 0); \ 303 a10 ^= sph_dec64le_aligned(buf + 8); \ 304 a20 ^= sph_dec64le_aligned(buf + 16); \ 305 a30 ^= sph_dec64le_aligned(buf + 24); \ 306 a40 ^= sph_dec64le_aligned(buf + 32); \ 307 a01 ^= sph_dec64le_aligned(buf + 40); \ 308 a11 ^= sph_dec64le_aligned(buf + 48); \ 309 a21 ^= sph_dec64le_aligned(buf + 56); \ 310 a31 ^= sph_dec64le_aligned(buf + 64); \ 311 a41 ^= sph_dec64le_aligned(buf + 72); \ 312 a02 ^= sph_dec64le_aligned(buf + 80); \ 313 a12 ^= sph_dec64le_aligned(buf + 88); \ 314 a22 ^= sph_dec64le_aligned(buf + 96); \ 317 #define INPUT_BUF72 do { \ 318 a00 ^= sph_dec64le_aligned(buf + 0); \ 319 a10 ^= sph_dec64le_aligned(buf + 8); \ 320 a20 ^= sph_dec64le_aligned(buf + 16); \ 321 a30 ^= sph_dec64le_aligned(buf + 24); \ 322 a40 ^= sph_dec64le_aligned(buf + 32); \ 323 a01 ^= sph_dec64le_aligned(buf + 40); \ 324 a11 ^= sph_dec64le_aligned(buf + 48); \ 325 a21 ^= sph_dec64le_aligned(buf + 56); \ 326 a31 ^= sph_dec64le_aligned(buf + 64); \ 329 #define INPUT_BUF(lim) do { \ 330 a00 ^= sph_dec64le_aligned(buf + 0); \ 331 a10 ^= sph_dec64le_aligned(buf + 8); \ 332 a20 ^= sph_dec64le_aligned(buf + 16); \ 333 a30 ^= sph_dec64le_aligned(buf + 24); \ 334 a40 ^= sph_dec64le_aligned(buf + 32); \ 335 a01 ^= sph_dec64le_aligned(buf + 40); \ 336 a11 ^= sph_dec64le_aligned(buf + 48); \ 337 a21 ^= sph_dec64le_aligned(buf + 56); \ 338 a31 ^= sph_dec64le_aligned(buf + 64); \ 341 a41 ^= sph_dec64le_aligned(buf + 72); \ 342 a02 ^= sph_dec64le_aligned(buf + 80); \ 343 a12 ^= sph_dec64le_aligned(buf + 88); \ 344 a22 ^= sph_dec64le_aligned(buf + 96); \ 347 a32 ^= sph_dec64le_aligned(buf + 104); \ 348 a42 ^= sph_dec64le_aligned(buf + 112); \ 349 a03 ^= sph_dec64le_aligned(buf + 120); \ 350 a13 ^= sph_dec64le_aligned(buf + 128); \ 353 a23 ^= sph_dec64le_aligned(buf + 136); \ 358 #define DECL64(x) sph_u64 x 359 #define MOV64(d, s) (d = s) 360 #define XOR64(d, a, b) (d = a ^ b) 361 #define AND64(d, a, b) (d = a & b) 362 #define OR64(d, a, b) (d = a | b) 363 #define NOT64(d, s) (d = SPH_T64(~s)) 364 #define ROL64(d, v, n) (d = SPH_ROTL64(v, n)) 365 #define XOR64_IOTA XOR64 369 static const struct {
372 #if SPH_KECCAK_INTERLEAVE 425 #if SPH_KECCAK_INTERLEAVE 427 #define INTERLEAVE(xl, xh) do { \ 429 l = (xl); h = (xh); \ 430 t = (l ^ (l >> 1)) & SPH_C32(0x22222222); l ^= t ^ (t << 1); \ 431 t = (h ^ (h >> 1)) & SPH_C32(0x22222222); h ^= t ^ (t << 1); \ 432 t = (l ^ (l >> 2)) & SPH_C32(0x0C0C0C0C); l ^= t ^ (t << 2); \ 433 t = (h ^ (h >> 2)) & SPH_C32(0x0C0C0C0C); h ^= t ^ (t << 2); \ 434 t = (l ^ (l >> 4)) & SPH_C32(0x00F000F0); l ^= t ^ (t << 4); \ 435 t = (h ^ (h >> 4)) & SPH_C32(0x00F000F0); h ^= t ^ (t << 4); \ 436 t = (l ^ (l >> 8)) & SPH_C32(0x0000FF00); l ^= t ^ (t << 8); \ 437 t = (h ^ (h >> 8)) & SPH_C32(0x0000FF00); h ^= t ^ (t << 8); \ 438 t = (l ^ SPH_T32(h << 16)) & SPH_C32(0xFFFF0000); \ 439 l ^= t; h ^= t >> 16; \ 440 (xl) = l; (xh) = h; \ 443 #define UNINTERLEAVE(xl, xh) do { \ 445 l = (xl); h = (xh); \ 446 t = (l ^ SPH_T32(h << 16)) & SPH_C32(0xFFFF0000); \ 447 l ^= t; h ^= t >> 16; \ 448 t = (l ^ (l >> 8)) & SPH_C32(0x0000FF00); l ^= t ^ (t << 8); \ 449 t = (h ^ (h >> 8)) & SPH_C32(0x0000FF00); h ^= t ^ (t << 8); \ 450 t = (l ^ (l >> 4)) & SPH_C32(0x00F000F0); l ^= t ^ (t << 4); \ 451 t = (h ^ (h >> 4)) & SPH_C32(0x00F000F0); h ^= t ^ (t << 4); \ 452 t = (l ^ (l >> 2)) & SPH_C32(0x0C0C0C0C); l ^= t ^ (t << 2); \ 453 t = (h ^ (h >> 2)) & SPH_C32(0x0C0C0C0C); h ^= t ^ (t << 2); \ 454 t = (l ^ (l >> 1)) & SPH_C32(0x22222222); l ^= t ^ (t << 1); \ 455 t = (h ^ (h >> 1)) & SPH_C32(0x22222222); h ^= t ^ (t << 1); \ 456 (xl) = l; (xh) = h; \ 461 #define INTERLEAVE(l, h) 462 #define UNINTERLEAVE(l, h) 466 #if SPH_KECCAK_NOCOPY 468 #define a00l (kc->u.narrow[2 * 0 + 0]) 469 #define a00h (kc->u.narrow[2 * 0 + 1]) 470 #define a10l (kc->u.narrow[2 * 1 + 0]) 471 #define a10h (kc->u.narrow[2 * 1 + 1]) 472 #define a20l (kc->u.narrow[2 * 2 + 0]) 473 #define a20h (kc->u.narrow[2 * 2 + 1]) 474 #define a30l (kc->u.narrow[2 * 3 + 0]) 475 #define a30h (kc->u.narrow[2 * 3 + 1]) 476 #define a40l (kc->u.narrow[2 * 4 + 0]) 477 #define a40h (kc->u.narrow[2 * 4 + 1]) 478 #define a01l (kc->u.narrow[2 * 5 + 0]) 479 #define a01h (kc->u.narrow[2 * 5 + 1]) 480 #define a11l (kc->u.narrow[2 * 6 + 0]) 481 #define a11h (kc->u.narrow[2 * 6 + 1]) 482 #define a21l (kc->u.narrow[2 * 7 + 0]) 483 #define a21h (kc->u.narrow[2 * 7 + 1]) 484 #define a31l (kc->u.narrow[2 * 8 + 0]) 485 #define a31h (kc->u.narrow[2 * 8 + 1]) 486 #define a41l (kc->u.narrow[2 * 9 + 0]) 487 #define a41h (kc->u.narrow[2 * 9 + 1]) 488 #define a02l (kc->u.narrow[2 * 10 + 0]) 489 #define a02h (kc->u.narrow[2 * 10 + 1]) 490 #define a12l (kc->u.narrow[2 * 11 + 0]) 491 #define a12h (kc->u.narrow[2 * 11 + 1]) 492 #define a22l (kc->u.narrow[2 * 12 + 0]) 493 #define a22h (kc->u.narrow[2 * 12 + 1]) 494 #define a32l (kc->u.narrow[2 * 13 + 0]) 495 #define a32h (kc->u.narrow[2 * 13 + 1]) 496 #define a42l (kc->u.narrow[2 * 14 + 0]) 497 #define a42h (kc->u.narrow[2 * 14 + 1]) 498 #define a03l (kc->u.narrow[2 * 15 + 0]) 499 #define a03h (kc->u.narrow[2 * 15 + 1]) 500 #define a13l (kc->u.narrow[2 * 16 + 0]) 501 #define a13h (kc->u.narrow[2 * 16 + 1]) 502 #define a23l (kc->u.narrow[2 * 17 + 0]) 503 #define a23h (kc->u.narrow[2 * 17 + 1]) 504 #define a33l (kc->u.narrow[2 * 18 + 0]) 505 #define a33h (kc->u.narrow[2 * 18 + 1]) 506 #define a43l (kc->u.narrow[2 * 19 + 0]) 507 #define a43h (kc->u.narrow[2 * 19 + 1]) 508 #define a04l (kc->u.narrow[2 * 20 + 0]) 509 #define a04h (kc->u.narrow[2 * 20 + 1]) 510 #define a14l (kc->u.narrow[2 * 21 + 0]) 511 #define a14h (kc->u.narrow[2 * 21 + 1]) 512 #define a24l (kc->u.narrow[2 * 22 + 0]) 513 #define a24h (kc->u.narrow[2 * 22 + 1]) 514 #define a34l (kc->u.narrow[2 * 23 + 0]) 515 #define a34h (kc->u.narrow[2 * 23 + 1]) 516 #define a44l (kc->u.narrow[2 * 24 + 0]) 517 #define a44h (kc->u.narrow[2 * 24 + 1]) 520 #define READ_STATE(state) 521 #define WRITE_STATE(state) 523 #define INPUT_BUF(size) do { \ 525 for (j = 0; j < (size); j += 8) { \ 527 tl = sph_dec32le_aligned(buf + j + 0); \ 528 th = sph_dec32le_aligned(buf + j + 4); \ 529 INTERLEAVE(tl, th); \ 530 kc->u.narrow[(j >> 2) + 0] ^= tl; \ 531 kc->u.narrow[(j >> 2) + 1] ^= th; \ 535 #define INPUT_BUF144 INPUT_BUF(144) 536 #define INPUT_BUF136 INPUT_BUF(136) 537 #define INPUT_BUF104 INPUT_BUF(104) 538 #define INPUT_BUF72 INPUT_BUF(72) 543 sph_u32 a00l, a00h, a01l, a01h, a02l, a02h, a03l, a03h, a04l, a04h; \ 544 sph_u32 a10l, a10h, a11l, a11h, a12l, a12h, a13l, a13h, a14l, a14h; \ 545 sph_u32 a20l, a20h, a21l, a21h, a22l, a22h, a23l, a23h, a24l, a24h; \ 546 sph_u32 a30l, a30h, a31l, a31h, a32l, a32h, a33l, a33h, a34l, a34h; \ 547 sph_u32 a40l, a40h, a41l, a41h, a42l, a42h, a43l, a43h, a44l, a44h; 549 #define READ_STATE(state) do { \ 550 a00l = (state)->u.narrow[2 * 0 + 0]; \ 551 a00h = (state)->u.narrow[2 * 0 + 1]; \ 552 a10l = (state)->u.narrow[2 * 1 + 0]; \ 553 a10h = (state)->u.narrow[2 * 1 + 1]; \ 554 a20l = (state)->u.narrow[2 * 2 + 0]; \ 555 a20h = (state)->u.narrow[2 * 2 + 1]; \ 556 a30l = (state)->u.narrow[2 * 3 + 0]; \ 557 a30h = (state)->u.narrow[2 * 3 + 1]; \ 558 a40l = (state)->u.narrow[2 * 4 + 0]; \ 559 a40h = (state)->u.narrow[2 * 4 + 1]; \ 560 a01l = (state)->u.narrow[2 * 5 + 0]; \ 561 a01h = (state)->u.narrow[2 * 5 + 1]; \ 562 a11l = (state)->u.narrow[2 * 6 + 0]; \ 563 a11h = (state)->u.narrow[2 * 6 + 1]; \ 564 a21l = (state)->u.narrow[2 * 7 + 0]; \ 565 a21h = (state)->u.narrow[2 * 7 + 1]; \ 566 a31l = (state)->u.narrow[2 * 8 + 0]; \ 567 a31h = (state)->u.narrow[2 * 8 + 1]; \ 568 a41l = (state)->u.narrow[2 * 9 + 0]; \ 569 a41h = (state)->u.narrow[2 * 9 + 1]; \ 570 a02l = (state)->u.narrow[2 * 10 + 0]; \ 571 a02h = (state)->u.narrow[2 * 10 + 1]; \ 572 a12l = (state)->u.narrow[2 * 11 + 0]; \ 573 a12h = (state)->u.narrow[2 * 11 + 1]; \ 574 a22l = (state)->u.narrow[2 * 12 + 0]; \ 575 a22h = (state)->u.narrow[2 * 12 + 1]; \ 576 a32l = (state)->u.narrow[2 * 13 + 0]; \ 577 a32h = (state)->u.narrow[2 * 13 + 1]; \ 578 a42l = (state)->u.narrow[2 * 14 + 0]; \ 579 a42h = (state)->u.narrow[2 * 14 + 1]; \ 580 a03l = (state)->u.narrow[2 * 15 + 0]; \ 581 a03h = (state)->u.narrow[2 * 15 + 1]; \ 582 a13l = (state)->u.narrow[2 * 16 + 0]; \ 583 a13h = (state)->u.narrow[2 * 16 + 1]; \ 584 a23l = (state)->u.narrow[2 * 17 + 0]; \ 585 a23h = (state)->u.narrow[2 * 17 + 1]; \ 586 a33l = (state)->u.narrow[2 * 18 + 0]; \ 587 a33h = (state)->u.narrow[2 * 18 + 1]; \ 588 a43l = (state)->u.narrow[2 * 19 + 0]; \ 589 a43h = (state)->u.narrow[2 * 19 + 1]; \ 590 a04l = (state)->u.narrow[2 * 20 + 0]; \ 591 a04h = (state)->u.narrow[2 * 20 + 1]; \ 592 a14l = (state)->u.narrow[2 * 21 + 0]; \ 593 a14h = (state)->u.narrow[2 * 21 + 1]; \ 594 a24l = (state)->u.narrow[2 * 22 + 0]; \ 595 a24h = (state)->u.narrow[2 * 22 + 1]; \ 596 a34l = (state)->u.narrow[2 * 23 + 0]; \ 597 a34h = (state)->u.narrow[2 * 23 + 1]; \ 598 a44l = (state)->u.narrow[2 * 24 + 0]; \ 599 a44h = (state)->u.narrow[2 * 24 + 1]; \ 602 #define WRITE_STATE(state) do { \ 603 (state)->u.narrow[2 * 0 + 0] = a00l; \ 604 (state)->u.narrow[2 * 0 + 1] = a00h; \ 605 (state)->u.narrow[2 * 1 + 0] = a10l; \ 606 (state)->u.narrow[2 * 1 + 1] = a10h; \ 607 (state)->u.narrow[2 * 2 + 0] = a20l; \ 608 (state)->u.narrow[2 * 2 + 1] = a20h; \ 609 (state)->u.narrow[2 * 3 + 0] = a30l; \ 610 (state)->u.narrow[2 * 3 + 1] = a30h; \ 611 (state)->u.narrow[2 * 4 + 0] = a40l; \ 612 (state)->u.narrow[2 * 4 + 1] = a40h; \ 613 (state)->u.narrow[2 * 5 + 0] = a01l; \ 614 (state)->u.narrow[2 * 5 + 1] = a01h; \ 615 (state)->u.narrow[2 * 6 + 0] = a11l; \ 616 (state)->u.narrow[2 * 6 + 1] = a11h; \ 617 (state)->u.narrow[2 * 7 + 0] = a21l; \ 618 (state)->u.narrow[2 * 7 + 1] = a21h; \ 619 (state)->u.narrow[2 * 8 + 0] = a31l; \ 620 (state)->u.narrow[2 * 8 + 1] = a31h; \ 621 (state)->u.narrow[2 * 9 + 0] = a41l; \ 622 (state)->u.narrow[2 * 9 + 1] = a41h; \ 623 (state)->u.narrow[2 * 10 + 0] = a02l; \ 624 (state)->u.narrow[2 * 10 + 1] = a02h; \ 625 (state)->u.narrow[2 * 11 + 0] = a12l; \ 626 (state)->u.narrow[2 * 11 + 1] = a12h; \ 627 (state)->u.narrow[2 * 12 + 0] = a22l; \ 628 (state)->u.narrow[2 * 12 + 1] = a22h; \ 629 (state)->u.narrow[2 * 13 + 0] = a32l; \ 630 (state)->u.narrow[2 * 13 + 1] = a32h; \ 631 (state)->u.narrow[2 * 14 + 0] = a42l; \ 632 (state)->u.narrow[2 * 14 + 1] = a42h; \ 633 (state)->u.narrow[2 * 15 + 0] = a03l; \ 634 (state)->u.narrow[2 * 15 + 1] = a03h; \ 635 (state)->u.narrow[2 * 16 + 0] = a13l; \ 636 (state)->u.narrow[2 * 16 + 1] = a13h; \ 637 (state)->u.narrow[2 * 17 + 0] = a23l; \ 638 (state)->u.narrow[2 * 17 + 1] = a23h; \ 639 (state)->u.narrow[2 * 18 + 0] = a33l; \ 640 (state)->u.narrow[2 * 18 + 1] = a33h; \ 641 (state)->u.narrow[2 * 19 + 0] = a43l; \ 642 (state)->u.narrow[2 * 19 + 1] = a43h; \ 643 (state)->u.narrow[2 * 20 + 0] = a04l; \ 644 (state)->u.narrow[2 * 20 + 1] = a04h; \ 645 (state)->u.narrow[2 * 21 + 0] = a14l; \ 646 (state)->u.narrow[2 * 21 + 1] = a14h; \ 647 (state)->u.narrow[2 * 22 + 0] = a24l; \ 648 (state)->u.narrow[2 * 22 + 1] = a24h; \ 649 (state)->u.narrow[2 * 23 + 0] = a34l; \ 650 (state)->u.narrow[2 * 23 + 1] = a34h; \ 651 (state)->u.narrow[2 * 24 + 0] = a44l; \ 652 (state)->u.narrow[2 * 24 + 1] = a44h; \ 655 #define READ64(d, off) do { \ 657 tl = sph_dec32le_aligned(buf + (off)); \ 658 th = sph_dec32le_aligned(buf + (off) + 4); \ 659 INTERLEAVE(tl, th); \ 664 #define INPUT_BUF144 do { \ 685 #define INPUT_BUF136 do { \ 705 #define INPUT_BUF104 do { \ 721 #define INPUT_BUF72 do { \ 733 #define INPUT_BUF(lim) do { \ 762 #define DECL64(x) sph_u64 x ## l, x ## h 763 #define MOV64(d, s) (d ## l = s ## l, d ## h = s ## h) 764 #define XOR64(d, a, b) (d ## l = a ## l ^ b ## l, d ## h = a ## h ^ b ## h) 765 #define AND64(d, a, b) (d ## l = a ## l & b ## l, d ## h = a ## h & b ## h) 766 #define OR64(d, a, b) (d ## l = a ## l | b ## l, d ## h = a ## h | b ## h) 767 #define NOT64(d, s) (d ## l = SPH_T32(~s ## l), d ## h = SPH_T32(~s ## h)) 768 #define ROL64(d, v, n) ROL64_ ## n(d, v) 770 #if SPH_KECCAK_INTERLEAVE 772 #define ROL64_odd1(d, v) do { \ 775 d ## l = SPH_T32(v ## h << 1) | (v ## h >> 31); \ 779 #define ROL64_odd63(d, v) do { \ 781 tmp = SPH_T32(v ## l << 31) | (v ## l >> 1); \ 786 #define ROL64_odd(d, v, n) do { \ 788 tmp = SPH_T32(v ## l << (n - 1)) | (v ## l >> (33 - n)); \ 789 d ## l = SPH_T32(v ## h << n) | (v ## h >> (32 - n)); \ 793 #define ROL64_even(d, v, n) do { \ 794 d ## l = SPH_T32(v ## l << n) | (v ## l >> (32 - n)); \ 795 d ## h = SPH_T32(v ## h << n) | (v ## h >> (32 - n)); \ 798 #define ROL64_0(d, v) 799 #define ROL64_1(d, v) ROL64_odd1(d, v) 800 #define ROL64_2(d, v) ROL64_even(d, v, 1) 801 #define ROL64_3(d, v) ROL64_odd( d, v, 2) 802 #define ROL64_4(d, v) ROL64_even(d, v, 2) 803 #define ROL64_5(d, v) ROL64_odd( d, v, 3) 804 #define ROL64_6(d, v) ROL64_even(d, v, 3) 805 #define ROL64_7(d, v) ROL64_odd( d, v, 4) 806 #define ROL64_8(d, v) ROL64_even(d, v, 4) 807 #define ROL64_9(d, v) ROL64_odd( d, v, 5) 808 #define ROL64_10(d, v) ROL64_even(d, v, 5) 809 #define ROL64_11(d, v) ROL64_odd( d, v, 6) 810 #define ROL64_12(d, v) ROL64_even(d, v, 6) 811 #define ROL64_13(d, v) ROL64_odd( d, v, 7) 812 #define ROL64_14(d, v) ROL64_even(d, v, 7) 813 #define ROL64_15(d, v) ROL64_odd( d, v, 8) 814 #define ROL64_16(d, v) ROL64_even(d, v, 8) 815 #define ROL64_17(d, v) ROL64_odd( d, v, 9) 816 #define ROL64_18(d, v) ROL64_even(d, v, 9) 817 #define ROL64_19(d, v) ROL64_odd( d, v, 10) 818 #define ROL64_20(d, v) ROL64_even(d, v, 10) 819 #define ROL64_21(d, v) ROL64_odd( d, v, 11) 820 #define ROL64_22(d, v) ROL64_even(d, v, 11) 821 #define ROL64_23(d, v) ROL64_odd( d, v, 12) 822 #define ROL64_24(d, v) ROL64_even(d, v, 12) 823 #define ROL64_25(d, v) ROL64_odd( d, v, 13) 824 #define ROL64_26(d, v) ROL64_even(d, v, 13) 825 #define ROL64_27(d, v) ROL64_odd( d, v, 14) 826 #define ROL64_28(d, v) ROL64_even(d, v, 14) 827 #define ROL64_29(d, v) ROL64_odd( d, v, 15) 828 #define ROL64_30(d, v) ROL64_even(d, v, 15) 829 #define ROL64_31(d, v) ROL64_odd( d, v, 16) 830 #define ROL64_32(d, v) ROL64_even(d, v, 16) 831 #define ROL64_33(d, v) ROL64_odd( d, v, 17) 832 #define ROL64_34(d, v) ROL64_even(d, v, 17) 833 #define ROL64_35(d, v) ROL64_odd( d, v, 18) 834 #define ROL64_36(d, v) ROL64_even(d, v, 18) 835 #define ROL64_37(d, v) ROL64_odd( d, v, 19) 836 #define ROL64_38(d, v) ROL64_even(d, v, 19) 837 #define ROL64_39(d, v) ROL64_odd( d, v, 20) 838 #define ROL64_40(d, v) ROL64_even(d, v, 20) 839 #define ROL64_41(d, v) ROL64_odd( d, v, 21) 840 #define ROL64_42(d, v) ROL64_even(d, v, 21) 841 #define ROL64_43(d, v) ROL64_odd( d, v, 22) 842 #define ROL64_44(d, v) ROL64_even(d, v, 22) 843 #define ROL64_45(d, v) ROL64_odd( d, v, 23) 844 #define ROL64_46(d, v) ROL64_even(d, v, 23) 845 #define ROL64_47(d, v) ROL64_odd( d, v, 24) 846 #define ROL64_48(d, v) ROL64_even(d, v, 24) 847 #define ROL64_49(d, v) ROL64_odd( d, v, 25) 848 #define ROL64_50(d, v) ROL64_even(d, v, 25) 849 #define ROL64_51(d, v) ROL64_odd( d, v, 26) 850 #define ROL64_52(d, v) ROL64_even(d, v, 26) 851 #define ROL64_53(d, v) ROL64_odd( d, v, 27) 852 #define ROL64_54(d, v) ROL64_even(d, v, 27) 853 #define ROL64_55(d, v) ROL64_odd( d, v, 28) 854 #define ROL64_56(d, v) ROL64_even(d, v, 28) 855 #define ROL64_57(d, v) ROL64_odd( d, v, 29) 856 #define ROL64_58(d, v) ROL64_even(d, v, 29) 857 #define ROL64_59(d, v) ROL64_odd( d, v, 30) 858 #define ROL64_60(d, v) ROL64_even(d, v, 30) 859 #define ROL64_61(d, v) ROL64_odd( d, v, 31) 860 #define ROL64_62(d, v) ROL64_even(d, v, 31) 861 #define ROL64_63(d, v) ROL64_odd63(d, v) 865 #define ROL64_small(d, v, n) do { \ 867 tmp = SPH_T32(v ## l << n) | (v ## h >> (32 - n)); \ 868 d ## h = SPH_T32(v ## h << n) | (v ## l >> (32 - n)); \ 872 #define ROL64_0(d, v) 0 873 #define ROL64_1(d, v) ROL64_small(d, v, 1) 874 #define ROL64_2(d, v) ROL64_small(d, v, 2) 875 #define ROL64_3(d, v) ROL64_small(d, v, 3) 876 #define ROL64_4(d, v) ROL64_small(d, v, 4) 877 #define ROL64_5(d, v) ROL64_small(d, v, 5) 878 #define ROL64_6(d, v) ROL64_small(d, v, 6) 879 #define ROL64_7(d, v) ROL64_small(d, v, 7) 880 #define ROL64_8(d, v) ROL64_small(d, v, 8) 881 #define ROL64_9(d, v) ROL64_small(d, v, 9) 882 #define ROL64_10(d, v) ROL64_small(d, v, 10) 883 #define ROL64_11(d, v) ROL64_small(d, v, 11) 884 #define ROL64_12(d, v) ROL64_small(d, v, 12) 885 #define ROL64_13(d, v) ROL64_small(d, v, 13) 886 #define ROL64_14(d, v) ROL64_small(d, v, 14) 887 #define ROL64_15(d, v) ROL64_small(d, v, 15) 888 #define ROL64_16(d, v) ROL64_small(d, v, 16) 889 #define ROL64_17(d, v) ROL64_small(d, v, 17) 890 #define ROL64_18(d, v) ROL64_small(d, v, 18) 891 #define ROL64_19(d, v) ROL64_small(d, v, 19) 892 #define ROL64_20(d, v) ROL64_small(d, v, 20) 893 #define ROL64_21(d, v) ROL64_small(d, v, 21) 894 #define ROL64_22(d, v) ROL64_small(d, v, 22) 895 #define ROL64_23(d, v) ROL64_small(d, v, 23) 896 #define ROL64_24(d, v) ROL64_small(d, v, 24) 897 #define ROL64_25(d, v) ROL64_small(d, v, 25) 898 #define ROL64_26(d, v) ROL64_small(d, v, 26) 899 #define ROL64_27(d, v) ROL64_small(d, v, 27) 900 #define ROL64_28(d, v) ROL64_small(d, v, 28) 901 #define ROL64_29(d, v) ROL64_small(d, v, 29) 902 #define ROL64_30(d, v) ROL64_small(d, v, 30) 903 #define ROL64_31(d, v) ROL64_small(d, v, 31) 905 #define ROL64_32(d, v) do { \ 912 #define ROL64_big(d, v, n) do { \ 914 ROL64_small(tr, v, n); \ 919 #define ROL64_33(d, v) ROL64_big(d, v, 1) 920 #define ROL64_34(d, v) ROL64_big(d, v, 2) 921 #define ROL64_35(d, v) ROL64_big(d, v, 3) 922 #define ROL64_36(d, v) ROL64_big(d, v, 4) 923 #define ROL64_37(d, v) ROL64_big(d, v, 5) 924 #define ROL64_38(d, v) ROL64_big(d, v, 6) 925 #define ROL64_39(d, v) ROL64_big(d, v, 7) 926 #define ROL64_40(d, v) ROL64_big(d, v, 8) 927 #define ROL64_41(d, v) ROL64_big(d, v, 9) 928 #define ROL64_42(d, v) ROL64_big(d, v, 10) 929 #define ROL64_43(d, v) ROL64_big(d, v, 11) 930 #define ROL64_44(d, v) ROL64_big(d, v, 12) 931 #define ROL64_45(d, v) ROL64_big(d, v, 13) 932 #define ROL64_46(d, v) ROL64_big(d, v, 14) 933 #define ROL64_47(d, v) ROL64_big(d, v, 15) 934 #define ROL64_48(d, v) ROL64_big(d, v, 16) 935 #define ROL64_49(d, v) ROL64_big(d, v, 17) 936 #define ROL64_50(d, v) ROL64_big(d, v, 18) 937 #define ROL64_51(d, v) ROL64_big(d, v, 19) 938 #define ROL64_52(d, v) ROL64_big(d, v, 20) 939 #define ROL64_53(d, v) ROL64_big(d, v, 21) 940 #define ROL64_54(d, v) ROL64_big(d, v, 22) 941 #define ROL64_55(d, v) ROL64_big(d, v, 23) 942 #define ROL64_56(d, v) ROL64_big(d, v, 24) 943 #define ROL64_57(d, v) ROL64_big(d, v, 25) 944 #define ROL64_58(d, v) ROL64_big(d, v, 26) 945 #define ROL64_59(d, v) ROL64_big(d, v, 27) 946 #define ROL64_60(d, v) ROL64_big(d, v, 28) 947 #define ROL64_61(d, v) ROL64_big(d, v, 29) 948 #define ROL64_62(d, v) ROL64_big(d, v, 30) 949 #define ROL64_63(d, v) ROL64_big(d, v, 31) 953 #define XOR64_IOTA(d, s, k) \ 954 (d ## l = s ## l ^ k.low, d ## h = s ## h ^ k.high) 958 #define TH_ELT(t, c0, c1, c2, c3, c4, d0, d1, d2, d3, d4) do { \ 963 XOR64(tt0, d0, d1); \ 964 XOR64(tt1, d2, d3); \ 965 XOR64(tt0, tt0, d4); \ 966 XOR64(tt0, tt0, tt1); \ 967 ROL64(tt0, tt0, 1); \ 968 XOR64(tt2, c0, c1); \ 969 XOR64(tt3, c2, c3); \ 970 XOR64(tt0, tt0, c4); \ 971 XOR64(tt2, tt2, tt3); \ 972 XOR64(t, tt0, tt2); \ 975 #define THETA(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \ 976 b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \ 977 b40, b41, b42, b43, b44) \ 984 TH_ELT(t0, b40, b41, b42, b43, b44, b10, b11, b12, b13, b14); \ 985 TH_ELT(t1, b00, b01, b02, b03, b04, b20, b21, b22, b23, b24); \ 986 TH_ELT(t2, b10, b11, b12, b13, b14, b30, b31, b32, b33, b34); \ 987 TH_ELT(t3, b20, b21, b22, b23, b24, b40, b41, b42, b43, b44); \ 988 TH_ELT(t4, b30, b31, b32, b33, b34, b00, b01, b02, b03, b04); \ 989 XOR64(b00, b00, t0); \ 990 XOR64(b01, b01, t0); \ 991 XOR64(b02, b02, t0); \ 992 XOR64(b03, b03, t0); \ 993 XOR64(b04, b04, t0); \ 994 XOR64(b10, b10, t1); \ 995 XOR64(b11, b11, t1); \ 996 XOR64(b12, b12, t1); \ 997 XOR64(b13, b13, t1); \ 998 XOR64(b14, b14, t1); \ 999 XOR64(b20, b20, t2); \ 1000 XOR64(b21, b21, t2); \ 1001 XOR64(b22, b22, t2); \ 1002 XOR64(b23, b23, t2); \ 1003 XOR64(b24, b24, t2); \ 1004 XOR64(b30, b30, t3); \ 1005 XOR64(b31, b31, t3); \ 1006 XOR64(b32, b32, t3); \ 1007 XOR64(b33, b33, t3); \ 1008 XOR64(b34, b34, t3); \ 1009 XOR64(b40, b40, t4); \ 1010 XOR64(b41, b41, t4); \ 1011 XOR64(b42, b42, t4); \ 1012 XOR64(b43, b43, t4); \ 1013 XOR64(b44, b44, t4); \ 1016 #define RHO(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \ 1017 b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \ 1018 b40, b41, b42, b43, b44) \ 1021 ROL64(b01, b01, 36); \ 1022 ROL64(b02, b02, 3); \ 1023 ROL64(b03, b03, 41); \ 1024 ROL64(b04, b04, 18); \ 1025 ROL64(b10, b10, 1); \ 1026 ROL64(b11, b11, 44); \ 1027 ROL64(b12, b12, 10); \ 1028 ROL64(b13, b13, 45); \ 1029 ROL64(b14, b14, 2); \ 1030 ROL64(b20, b20, 62); \ 1031 ROL64(b21, b21, 6); \ 1032 ROL64(b22, b22, 43); \ 1033 ROL64(b23, b23, 15); \ 1034 ROL64(b24, b24, 61); \ 1035 ROL64(b30, b30, 28); \ 1036 ROL64(b31, b31, 55); \ 1037 ROL64(b32, b32, 25); \ 1038 ROL64(b33, b33, 21); \ 1039 ROL64(b34, b34, 56); \ 1040 ROL64(b40, b40, 27); \ 1041 ROL64(b41, b41, 20); \ 1042 ROL64(b42, b42, 39); \ 1043 ROL64(b43, b43, 8); \ 1044 ROL64(b44, b44, 14); \ 1058 #define KHI_XO(d, a, b, c) do { \ 1064 #define KHI_XA(d, a, b, c) do { \ 1070 #define KHI(b00, b01, b02, b03, b04, b10, b11, b12, b13, b14, \ 1071 b20, b21, b22, b23, b24, b30, b31, b32, b33, b34, \ 1072 b40, b41, b42, b43, b44) \ 1081 KHI_XO(c0, b00, b10, b20); \ 1082 KHI_XO(c1, b10, bnn, b30); \ 1083 KHI_XA(c2, b20, b30, b40); \ 1084 KHI_XO(c3, b30, b40, b00); \ 1085 KHI_XA(c4, b40, b00, b10); \ 1092 KHI_XO(c0, b01, b11, b21); \ 1093 KHI_XA(c1, b11, b21, b31); \ 1094 KHI_XO(c2, b21, b31, bnn); \ 1095 KHI_XO(c3, b31, b41, b01); \ 1096 KHI_XA(c4, b41, b01, b11); \ 1103 KHI_XO(c0, b02, b12, b22); \ 1104 KHI_XA(c1, b12, b22, b32); \ 1105 KHI_XA(c2, b22, bnn, b42); \ 1106 KHI_XO(c3, bnn, b42, b02); \ 1107 KHI_XA(c4, b42, b02, b12); \ 1114 KHI_XA(c0, b03, b13, b23); \ 1115 KHI_XO(c1, b13, b23, b33); \ 1116 KHI_XO(c2, b23, bnn, b43); \ 1117 KHI_XA(c3, bnn, b43, b03); \ 1118 KHI_XO(c4, b43, b03, b13); \ 1125 KHI_XA(c0, b04, bnn, b24); \ 1126 KHI_XO(c1, bnn, b24, b34); \ 1127 KHI_XA(c2, b24, b34, b44); \ 1128 KHI_XO(c3, b34, b44, b04); \ 1129 KHI_XA(c4, b44, b04, b14); \ 1137 #define IOTA(r) XOR64_IOTA(a00, a00, r) 1139 #define P0 a00, a01, a02, a03, a04, a10, a11, a12, a13, a14, a20, a21, \ 1140 a22, a23, a24, a30, a31, a32, a33, a34, a40, a41, a42, a43, a44 1141 #define P1 a00, a30, a10, a40, a20, a11, a41, a21, a01, a31, a22, a02, \ 1142 a32, a12, a42, a33, a13, a43, a23, a03, a44, a24, a04, a34, a14 1143 #define P2 a00, a33, a11, a44, a22, a41, a24, a02, a30, a13, a32, a10, \ 1144 a43, a21, a04, a23, a01, a34, a12, a40, a14, a42, a20, a03, a31 1145 #define P3 a00, a23, a41, a14, a32, a24, a42, a10, a33, a01, a43, a11, \ 1146 a34, a02, a20, a12, a30, a03, a21, a44, a31, a04, a22, a40, a13 1147 #define P4 a00, a12, a24, a31, a43, a42, a04, a11, a23, a30, a34, a41, \ 1148 a03, a10, a22, a21, a33, a40, a02, a14, a13, a20, a32, a44, a01 1149 #define P5 a00, a21, a42, a13, a34, a04, a20, a41, a12, a33, a03, a24, \ 1150 a40, a11, a32, a02, a23, a44, a10, a31, a01, a22, a43, a14, a30 1151 #define P6 a00, a02, a04, a01, a03, a20, a22, a24, a21, a23, a40, a42, \ 1152 a44, a41, a43, a10, a12, a14, a11, a13, a30, a32, a34, a31, a33 1153 #define P7 a00, a10, a20, a30, a40, a22, a32, a42, a02, a12, a44, a04, \ 1154 a14, a24, a34, a11, a21, a31, a41, a01, a33, a43, a03, a13, a23 1155 #define P8 a00, a11, a22, a33, a44, a32, a43, a04, a10, a21, a14, a20, \ 1156 a31, a42, a03, a41, a02, a13, a24, a30, a23, a34, a40, a01, a12 1157 #define P9 a00, a41, a32, a23, a14, a43, a34, a20, a11, a02, a31, a22, \ 1158 a13, a04, a40, a24, a10, a01, a42, a33, a12, a03, a44, a30, a21 1159 #define P10 a00, a24, a43, a12, a31, a34, a03, a22, a41, a10, a13, a32, \ 1160 a01, a20, a44, a42, a11, a30, a04, a23, a21, a40, a14, a33, a02 1161 #define P11 a00, a42, a34, a21, a13, a03, a40, a32, a24, a11, a01, a43, \ 1162 a30, a22, a14, a04, a41, a33, a20, a12, a02, a44, a31, a23, a10 1163 #define P12 a00, a04, a03, a02, a01, a40, a44, a43, a42, a41, a30, a34, \ 1164 a33, a32, a31, a20, a24, a23, a22, a21, a10, a14, a13, a12, a11 1165 #define P13 a00, a20, a40, a10, a30, a44, a14, a34, a04, a24, a33, a03, \ 1166 a23, a43, a13, a22, a42, a12, a32, a02, a11, a31, a01, a21, a41 1167 #define P14 a00, a22, a44, a11, a33, a14, a31, a03, a20, a42, a23, a40, \ 1168 a12, a34, a01, a32, a04, a21, a43, a10, a41, a13, a30, a02, a24 1169 #define P15 a00, a32, a14, a41, a23, a31, a13, a40, a22, a04, a12, a44, \ 1170 a21, a03, a30, a43, a20, a02, a34, a11, a24, a01, a33, a10, a42 1171 #define P16 a00, a43, a31, a24, a12, a13, a01, a44, a32, a20, a21, a14, \ 1172 a02, a40, a33, a34, a22, a10, a03, a41, a42, a30, a23, a11, a04 1173 #define P17 a00, a34, a13, a42, a21, a01, a30, a14, a43, a22, a02, a31, \ 1174 a10, a44, a23, a03, a32, a11, a40, a24, a04, a33, a12, a41, a20 1175 #define P18 a00, a03, a01, a04, a02, a30, a33, a31, a34, a32, a10, a13, \ 1176 a11, a14, a12, a40, a43, a41, a44, a42, a20, a23, a21, a24, a22 1177 #define P19 a00, a40, a30, a20, a10, a33, a23, a13, a03, a43, a11, a01, \ 1178 a41, a31, a21, a44, a34, a24, a14, a04, a22, a12, a02, a42, a32 1179 #define P20 a00, a44, a33, a22, a11, a23, a12, a01, a40, a34, a41, a30, \ 1180 a24, a13, a02, a14, a03, a42, a31, a20, a32, a21, a10, a04, a43 1181 #define P21 a00, a14, a23, a32, a41, a12, a21, a30, a44, a03, a24, a33, \ 1182 a42, a01, a10, a31, a40, a04, a13, a22, a43, a02, a11, a20, a34 1183 #define P22 a00, a31, a12, a43, a24, a21, a02, a33, a14, a40, a42, a23, \ 1184 a04, a30, a11, a13, a44, a20, a01, a32, a34, a10, a41, a22, a03 1185 #define P23 a00, a13, a21, a34, a42, a02, a10, a23, a31, a44, a04, a12, \ 1186 a20, a33, a41, a01, a14, a22, a30, a43, a03, a11, a24, a32, a40 1188 #define P1_TO_P0 do { \ 1217 #define P2_TO_P0 do { \ 1247 #define P4_TO_P0 do { \ 1279 #define P6_TO_P0 do { \ 1313 #define P8_TO_P0 do { \ 1349 #define P12_TO_P0 do { \ 1392 #define KF_ELT(r, s, k) do { \ 1393 THETA LPAR P ## r RPAR; \ 1394 RHO LPAR P ## r RPAR; \ 1395 KHI LPAR P ## s RPAR; \ 1401 #define KECCAK_F_1600 DO(KECCAK_F_1600_) 1403 #if SPH_KECCAK_UNROLL == 1 1405 #define KECCAK_F_1600_ do { \ 1407 for (j = 0; j < 24; j ++) { \ 1408 KF_ELT( 0, 1, RC[j + 0]); \ 1413 #elif SPH_KECCAK_UNROLL == 2 1415 #define KECCAK_F_1600_ do { \ 1417 for (j = 0; j < 24; j += 2) { \ 1418 KF_ELT( 0, 1, RC[j + 0]); \ 1419 KF_ELT( 1, 2, RC[j + 1]); \ 1424 #elif SPH_KECCAK_UNROLL == 4 1426 #define KECCAK_F_1600_ do { \ 1428 for (j = 0; j < 24; j += 4) { \ 1429 KF_ELT( 0, 1, RC[j + 0]); \ 1430 KF_ELT( 1, 2, RC[j + 1]); \ 1431 KF_ELT( 2, 3, RC[j + 2]); \ 1432 KF_ELT( 3, 4, RC[j + 3]); \ 1437 #elif SPH_KECCAK_UNROLL == 6 1439 #define KECCAK_F_1600_ do { \ 1441 for (j = 0; j < 24; j += 6) { \ 1442 KF_ELT( 0, 1, RC[j + 0]); \ 1443 KF_ELT( 1, 2, RC[j + 1]); \ 1444 KF_ELT( 2, 3, RC[j + 2]); \ 1445 KF_ELT( 3, 4, RC[j + 3]); \ 1446 KF_ELT( 4, 5, RC[j + 4]); \ 1447 KF_ELT( 5, 6, RC[j + 5]); \ 1452 #elif SPH_KECCAK_UNROLL == 8 1454 #define KECCAK_F_1600_ do { \ 1456 for (j = 0; j < 24; j += 8) { \ 1457 KF_ELT( 0, 1, RC[j + 0]); \ 1458 KF_ELT( 1, 2, RC[j + 1]); \ 1459 KF_ELT( 2, 3, RC[j + 2]); \ 1460 KF_ELT( 3, 4, RC[j + 3]); \ 1461 KF_ELT( 4, 5, RC[j + 4]); \ 1462 KF_ELT( 5, 6, RC[j + 5]); \ 1463 KF_ELT( 6, 7, RC[j + 6]); \ 1464 KF_ELT( 7, 8, RC[j + 7]); \ 1469 #elif SPH_KECCAK_UNROLL == 12 1471 #define KECCAK_F_1600_ do { \ 1473 for (j = 0; j < 24; j += 12) { \ 1474 KF_ELT( 0, 1, RC[j + 0]); \ 1475 KF_ELT( 1, 2, RC[j + 1]); \ 1476 KF_ELT( 2, 3, RC[j + 2]); \ 1477 KF_ELT( 3, 4, RC[j + 3]); \ 1478 KF_ELT( 4, 5, RC[j + 4]); \ 1479 KF_ELT( 5, 6, RC[j + 5]); \ 1480 KF_ELT( 6, 7, RC[j + 6]); \ 1481 KF_ELT( 7, 8, RC[j + 7]); \ 1482 KF_ELT( 8, 9, RC[j + 8]); \ 1483 KF_ELT( 9, 10, RC[j + 9]); \ 1484 KF_ELT(10, 11, RC[j + 10]); \ 1485 KF_ELT(11, 12, RC[j + 11]); \ 1490 #elif SPH_KECCAK_UNROLL == 0 1492 #define KECCAK_F_1600_ do { \ 1493 KF_ELT( 0, 1, RC[ 0]); \ 1494 KF_ELT( 1, 2, RC[ 1]); \ 1495 KF_ELT( 2, 3, RC[ 2]); \ 1496 KF_ELT( 3, 4, RC[ 3]); \ 1497 KF_ELT( 4, 5, RC[ 4]); \ 1498 KF_ELT( 5, 6, RC[ 5]); \ 1499 KF_ELT( 6, 7, RC[ 6]); \ 1500 KF_ELT( 7, 8, RC[ 7]); \ 1501 KF_ELT( 8, 9, RC[ 8]); \ 1502 KF_ELT( 9, 10, RC[ 9]); \ 1503 KF_ELT(10, 11, RC[10]); \ 1504 KF_ELT(11, 12, RC[11]); \ 1505 KF_ELT(12, 13, RC[12]); \ 1506 KF_ELT(13, 14, RC[13]); \ 1507 KF_ELT(14, 15, RC[14]); \ 1508 KF_ELT(15, 16, RC[15]); \ 1509 KF_ELT(16, 17, RC[16]); \ 1510 KF_ELT(17, 18, RC[17]); \ 1511 KF_ELT(18, 19, RC[18]); \ 1512 KF_ELT(19, 20, RC[19]); \ 1513 KF_ELT(20, 21, RC[20]); \ 1514 KF_ELT(21, 22, RC[21]); \ 1515 KF_ELT(22, 23, RC[22]); \ 1516 KF_ELT(23, 0, RC[23]); \ 1521 #error Unimplemented unroll count for Keccak. 1531 for (i = 0; i < 25; i ++)
1536 kc->
u.wide[ 1] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1537 kc->
u.wide[ 2] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1538 kc->
u.wide[ 8] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1539 kc->
u.wide[12] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1540 kc->
u.wide[17] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1541 kc->
u.wide[20] = SPH_C64(0xFFFFFFFFFFFFFFFF);
1544 for (i = 0; i < 50; i ++)
1565 kc->
lim = 200 - (out_size >> 2);
1578 if (len < (lim - ptr)) {
1579 memcpy(buf + ptr, data, len);
1580 kc->
ptr = ptr + len;
1591 memcpy(buf + ptr, data, clen);
1593 data = (
const unsigned char *)data + clen;
1607 #define DEFCLOSE(d, lim) \ 1608 static void keccak_close ## d( \ 1609 sph_keccak_context *kc, unsigned ub, unsigned n, void *dst) \ 1613 unsigned char tmp[lim + 1]; \ 1618 eb = (0x100 | (ub & 0xFF)) >> (8 - n); \ 1619 if (kc->ptr == (lim - 1)) { \ 1622 memset(u.tmp + 1, 0, lim - 1); \ 1623 u.tmp[lim] = 0x80; \ 1626 u.tmp[0] = eb | 0x80; \ 1630 j = lim - kc->ptr; \ 1632 memset(u.tmp + 1, 0, j - 2); \ 1633 u.tmp[j - 1] = 0x80; \ 1635 keccak_core(kc, u.tmp, j, lim); \ 1637 kc->u.wide[ 1] = ~kc->u.wide[ 1]; \ 1638 kc->u.wide[ 2] = ~kc->u.wide[ 2]; \ 1639 kc->u.wide[ 8] = ~kc->u.wide[ 8]; \ 1640 kc->u.wide[12] = ~kc->u.wide[12]; \ 1641 kc->u.wide[17] = ~kc->u.wide[17]; \ 1642 kc->u.wide[20] = ~kc->u.wide[20]; \ 1643 for (j = 0; j < d; j += 8) \ 1644 sph_enc64le_aligned(u.tmp + j, kc->u.wide[j >> 3]); \ 1645 memcpy(dst, u.tmp, d); \ 1646 keccak_init(kc, (unsigned)d << 3); \ 1651 #define DEFCLOSE(d, lim) \ 1652 static void keccak_close ## d( \ 1653 sph_keccak_context *kc, unsigned ub, unsigned n, void *dst) \ 1657 unsigned char tmp[lim + 1]; \ 1662 eb = (0x100 | (ub & 0xFF)) >> (8 - n); \ 1663 if (kc->ptr == (lim - 1)) { \ 1666 memset(u.tmp + 1, 0, lim - 1); \ 1667 u.tmp[lim] = 0x80; \ 1670 u.tmp[0] = eb | 0x80; \ 1674 j = lim - kc->ptr; \ 1676 memset(u.tmp + 1, 0, j - 2); \ 1677 u.tmp[j - 1] = 0x80; \ 1679 keccak_core(kc, u.tmp, j, lim); \ 1681 kc->u.narrow[ 2] = ~kc->u.narrow[ 2]; \ 1682 kc->u.narrow[ 3] = ~kc->u.narrow[ 3]; \ 1683 kc->u.narrow[ 4] = ~kc->u.narrow[ 4]; \ 1684 kc->u.narrow[ 5] = ~kc->u.narrow[ 5]; \ 1685 kc->u.narrow[16] = ~kc->u.narrow[16]; \ 1686 kc->u.narrow[17] = ~kc->u.narrow[17]; \ 1687 kc->u.narrow[24] = ~kc->u.narrow[24]; \ 1688 kc->u.narrow[25] = ~kc->u.narrow[25]; \ 1689 kc->u.narrow[34] = ~kc->u.narrow[34]; \ 1690 kc->u.narrow[35] = ~kc->u.narrow[35]; \ 1691 kc->u.narrow[40] = ~kc->u.narrow[40]; \ 1692 kc->u.narrow[41] = ~kc->u.narrow[41]; \ 1694 for (j = 0; j < 50; j += 2) \ 1695 UNINTERLEAVE(kc->u.narrow[j], kc->u.narrow[j + 1]); \ 1696 for (j = 0; j < d; j += 4) \ 1697 sph_enc32le_aligned(u.tmp + j, kc->u.narrow[j >> 2]); \ 1698 memcpy(dst, u.tmp, d); \ 1699 keccak_init(kc, (unsigned)d << 3); \ 1734 keccak_close28(cc, ub, n, dst);
1762 keccak_close32(cc, ub, n, dst);
1790 keccak_close48(cc, ub, n, dst);
1818 keccak_close64(cc, ub, n, dst);
void sph_keccak224_init(void *cc)
Initialize a Keccak-224 context.
void sph_keccak384_close(void *cc, void *dst)
Terminate the current Keccak-384 computation and output the result into the provided buffer...
void sph_keccak384_init(void *cc)
Initialize a Keccak-384 context.
void sph_keccak256_init(void *cc)
Initialize a Keccak-256 context.
#define READ_STATE(state)
void sph_keccak224(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_keccak256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void sph_keccak384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
static void keccak_core(sph_keccak_context *kc, const void *data, size_t len, size_t lim)
static const struct @0 RC[]
static void keccak_init(sph_keccak_context *kc, unsigned out_size)
#define WRITE_STATE(state)
void sph_keccak512_init(void *cc)
Initialize a Keccak-512 context.
void sph_keccak512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void * memcpy(void *a, const void *b, size_t c)
void sph_keccak512(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_keccak256_close(void *cc, void *dst)
Terminate the current Keccak-256 computation and output the result into the provided buffer...
void sph_keccak224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
Add a few additional bits (0 to 7) to the current computation, then terminate it and output the resul...
void sph_keccak512_close(void *cc, void *dst)
Terminate the current Keccak-512 computation and output the result into the provided buffer...
void sph_keccak384(void *cc, const void *data, size_t len)
Process some data bytes.
This structure is a context for Keccak computations: it contains the intermediate values and some dat...
union sph_keccak_context::@6 u
void sph_keccak256(void *cc, const void *data, size_t len)
Process some data bytes.
void sph_keccak224_close(void *cc, void *dst)
Terminate the current Keccak-224 computation and output the result into the provided buffer...