blake2s_sse.cpp 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // Based on public domain code written in 2012 by Samuel Neves
  2. extern const byte blake2s_sigma[10][16];
  3. // Initialization vector.
  4. static __m128i blake2s_IV_0_3, blake2s_IV_4_7;
  5. #ifdef _WIN_64
  6. // Constants for cyclic rotation. Used in 64-bit mode in mm_rotr_epi32 macro.
  7. static __m128i crotr8, crotr16;
  8. #endif
  9. static void blake2s_init_sse()
  10. {
  11. // We cannot initialize these 128 bit variables in place when declaring
  12. // them globally, because global scope initialization is performed before
  13. // our SSE check and it would make code incompatible with older non-SSE2
  14. // CPUs. Also we cannot initialize them as static inside of function
  15. // using these variables, because SSE static initialization is not thread
  16. // safe: first thread starts initialization and sets "init done" flag even
  17. // if it is not done yet, second thread can attempt to access half-init
  18. // SSE data. So we moved init code here.
  19. blake2s_IV_0_3 = _mm_setr_epi32( 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A );
  20. blake2s_IV_4_7 = _mm_setr_epi32( 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 );
  21. #ifdef _WIN_64
  22. crotr8 = _mm_set_epi8( 12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1 );
  23. crotr16 = _mm_set_epi8( 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2 );
  24. #endif
  25. }
  26. #define LOAD(p) _mm_load_si128( (__m128i *)(p) )
  27. #define STORE(p,r) _mm_store_si128((__m128i *)(p), r)
  28. #ifdef _WIN_32
  29. // 32-bit mode has less SSE2 registers and in MSVC2008 it is more efficient
  30. // to not use _mm_shuffle_epi8 here.
  31. #define mm_rotr_epi32(r, c) ( \
  32. _mm_xor_si128(_mm_srli_epi32( (r), c ),_mm_slli_epi32( (r), 32-c )) )
  33. #else
  34. #define mm_rotr_epi32(r, c) ( \
  35. c==8 ? _mm_shuffle_epi8(r,crotr8) \
  36. : c==16 ? _mm_shuffle_epi8(r,crotr16) \
  37. : _mm_xor_si128(_mm_srli_epi32( (r), c ),_mm_slli_epi32( (r), 32-c )) )
  38. #endif
  39. #define G1(row1,row2,row3,row4,buf) \
  40. row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \
  41. row4 = _mm_xor_si128( row4, row1 ); \
  42. row4 = mm_rotr_epi32(row4, 16); \
  43. row3 = _mm_add_epi32( row3, row4 ); \
  44. row2 = _mm_xor_si128( row2, row3 ); \
  45. row2 = mm_rotr_epi32(row2, 12);
  46. #define G2(row1,row2,row3,row4,buf) \
  47. row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \
  48. row4 = _mm_xor_si128( row4, row1 ); \
  49. row4 = mm_rotr_epi32(row4, 8); \
  50. row3 = _mm_add_epi32( row3, row4 ); \
  51. row2 = _mm_xor_si128( row2, row3 ); \
  52. row2 = mm_rotr_epi32(row2, 7);
  53. #define DIAGONALIZE(row1,row2,row3,row4) \
  54. row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(2,1,0,3) ); \
  55. row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(1,0,3,2) ); \
  56. row2 = _mm_shuffle_epi32( row2, _MM_SHUFFLE(0,3,2,1) );
  57. #define UNDIAGONALIZE(row1,row2,row3,row4) \
  58. row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(0,3,2,1) ); \
  59. row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(1,0,3,2) ); \
  60. row2 = _mm_shuffle_epi32( row2, _MM_SHUFFLE(2,1,0,3) );
  61. #ifdef _WIN_64
  62. // MSVC 2008 in x64 mode expands _mm_set_epi32 to store to stack and load
  63. // from stack operations, which are slower than this code.
  64. #define _mm_set_epi32(i3,i2,i1,i0) \
  65. _mm_unpacklo_epi32(_mm_unpacklo_epi32(_mm_cvtsi32_si128(i0),_mm_cvtsi32_si128(i2)), \
  66. _mm_unpacklo_epi32(_mm_cvtsi32_si128(i1),_mm_cvtsi32_si128(i3)))
  67. #endif
  68. // Original BLAKE2 SSE4.1 message loading code was a little slower in x86 mode
  69. // and about the same in x64 mode in our test. Perhaps depends on compiler.
  70. // We also tried _mm_i32gather_epi32 and _mm256_i32gather_epi32 AVX2 gather
  71. // instructions here, but they did not show any speed gain on i7-6700K.
  72. #define SSE_ROUND(m,row,r) \
  73. { \
  74. __m128i buf; \
  75. buf=_mm_set_epi32(m[blake2s_sigma[r][6]],m[blake2s_sigma[r][4]],m[blake2s_sigma[r][2]],m[blake2s_sigma[r][0]]); \
  76. G1(row[0],row[1],row[2],row[3],buf); \
  77. buf=_mm_set_epi32(m[blake2s_sigma[r][7]],m[blake2s_sigma[r][5]],m[blake2s_sigma[r][3]],m[blake2s_sigma[r][1]]); \
  78. G2(row[0],row[1],row[2],row[3],buf); \
  79. DIAGONALIZE(row[0],row[1],row[2],row[3]); \
  80. buf=_mm_set_epi32(m[blake2s_sigma[r][14]],m[blake2s_sigma[r][12]],m[blake2s_sigma[r][10]],m[blake2s_sigma[r][8]]); \
  81. G1(row[0],row[1],row[2],row[3],buf); \
  82. buf=_mm_set_epi32(m[blake2s_sigma[r][15]],m[blake2s_sigma[r][13]],m[blake2s_sigma[r][11]],m[blake2s_sigma[r][9]]); \
  83. G2(row[0],row[1],row[2],row[3],buf); \
  84. UNDIAGONALIZE(row[0],row[1],row[2],row[3]); \
  85. }
  86. static int blake2s_compress_sse( blake2s_state *S, const byte block[BLAKE2S_BLOCKBYTES] )
  87. {
  88. __m128i row[4];
  89. __m128i ff0, ff1;
  90. const uint32 *m = ( uint32 * )block;
  91. row[0] = ff0 = LOAD( &S->h[0] );
  92. row[1] = ff1 = LOAD( &S->h[4] );
  93. row[2] = blake2s_IV_0_3;
  94. row[3] = _mm_xor_si128( blake2s_IV_4_7, LOAD( &S->t[0] ) );
  95. SSE_ROUND( m, row, 0 );
  96. SSE_ROUND( m, row, 1 );
  97. SSE_ROUND( m, row, 2 );
  98. SSE_ROUND( m, row, 3 );
  99. SSE_ROUND( m, row, 4 );
  100. SSE_ROUND( m, row, 5 );
  101. SSE_ROUND( m, row, 6 );
  102. SSE_ROUND( m, row, 7 );
  103. SSE_ROUND( m, row, 8 );
  104. SSE_ROUND( m, row, 9 );
  105. STORE( &S->h[0], _mm_xor_si128( ff0, _mm_xor_si128( row[0], row[2] ) ) );
  106. STORE( &S->h[4], _mm_xor_si128( ff1, _mm_xor_si128( row[1], row[3] ) ) );
  107. return 0;
  108. }