Dash Core Source Documentation (0.16.0.1)

Find detailed information regarding the Dash Core source code.

sph_types.h
Go to the documentation of this file.
1 /* $Id: sph_types.h 260 2011-07-21 01:02:38Z tp $ */
47 #ifndef SPH_TYPES_H__
48 #define SPH_TYPES_H__
49 
50 #include <limits.h>
51 
52 /*
53  * All our I/O functions are defined over octet streams. We do not know
54  * how to handle input data if bytes are not octets.
55  */
56 #if CHAR_BIT != 8
57 #error This code requires 8-bit bytes
58 #endif
59 
60 /* ============= BEGIN documentation block for Doxygen ============ */
61 
62 #ifdef DOXYGEN_IGNORE
63 
384 typedef __arch_dependant__ sph_u32;
385 
390 typedef __arch_dependant__ sph_s32;
391 
402 typedef __arch_dependant__ sph_u64;
403 
408 typedef __arch_dependant__ sph_s64;
409 
418 #define SPH_C32(x)
419 
426 #define SPH_T32(x)
427 
438 #define SPH_ROTL32(x, n)
439 
450 #define SPH_ROTR32(x, n)
451 
456 #define SPH_64
457 
462 #define SPH_64_TRUE
463 
473 #define SPH_C64(x)
474 
482 #define SPH_T64(x)
483 
495 #define SPH_ROTL64(x, n)
496 
508 #define SPH_ROTR64(x, n)
509 
517 #define SPH_INLINE
518 
525 #define SPH_LITTLE_ENDIAN
526 
533 #define SPH_BIG_ENDIAN
534 
542 #define SPH_LITTLE_FAST
543 
551 #define SPH_BIG_FAST
552 
559 #define SPH_UPTR
560 
566 #define SPH_UNALIGNED
567 
576 static inline sph_u32 sph_bswap32(sph_u32 x);
577 
587 static inline sph_u64 sph_bswap64(sph_u64 x);
588 
596 static inline unsigned sph_dec16le(const void *src);
597 
605 static inline void sph_enc16le(void *dst, unsigned val);
606 
614 static inline unsigned sph_dec16be(const void *src);
615 
623 static inline void sph_enc16be(void *dst, unsigned val);
624 
632 static inline sph_u32 sph_dec32le(const void *src);
633 
644 static inline sph_u32 sph_dec32le_aligned(const void *src);
645 
653 static inline void sph_enc32le(void *dst, sph_u32 val);
654 
665 static inline void sph_enc32le_aligned(void *dst, sph_u32 val);
666 
674 static inline sph_u32 sph_dec32be(const void *src);
675 
686 static inline sph_u32 sph_dec32be_aligned(const void *src);
687 
695 static inline void sph_enc32be(void *dst, sph_u32 val);
696 
707 static inline void sph_enc32be_aligned(void *dst, sph_u32 val);
708 
717 static inline sph_u64 sph_dec64le(const void *src);
718 
730 static inline sph_u64 sph_dec64le_aligned(const void *src);
731 
740 static inline void sph_enc64le(void *dst, sph_u64 val);
741 
754 static inline void sph_enc64le_aligned(void *dst, sph_u64 val);
755 
764 static inline sph_u64 sph_dec64be(const void *src);
765 
777 static inline sph_u64 sph_dec64be_aligned(const void *src);
778 
787 static inline void sph_enc64be(void *dst, sph_u64 val);
788 
801 static inline void sph_enc64be_aligned(void *dst, sph_u64 val);
802 
803 #endif
804 
805 /* ============== END documentation block for Doxygen ============= */
806 
807 #ifndef DOXYGEN_IGNORE
808 
809 /*
810  * We want to define the types "sph_u32" and "sph_u64" which hold
811  * unsigned values of at least, respectively, 32 and 64 bits. These
812  * tests should select appropriate types for most platforms. The
813  * macro "SPH_64" is defined if the 64-bit is supported.
814  */
815 
816 #undef SPH_64
817 #undef SPH_64_TRUE
818 
819 #if defined __STDC__ && __STDC_VERSION__ >= 199901L
820 
821 /*
822  * On C99 implementations, we can use <stdint.h> to get an exact 64-bit
823  * type, if any, or otherwise use a wider type (which must exist, for
824  * C99 conformance).
825  */
826 
827 #include <stdint.h>
828 
829 #ifdef UINT32_MAX
830 typedef uint32_t sph_u32;
831 typedef int32_t sph_s32;
832 #else
833 typedef uint_fast32_t sph_u32;
834 typedef int_fast32_t sph_s32;
835 #endif
836 #if !SPH_NO_64
837 #ifdef UINT64_MAX
838 typedef uint64_t sph_u64;
839 typedef int64_t sph_s64;
840 #else
841 typedef uint_fast64_t sph_u64;
842 typedef int_fast64_t sph_s64;
843 #endif
844 #endif
845 
846 #define SPH_C32(x) ((sph_u32)(x))
847 #if !SPH_NO_64
848 #define SPH_C64(x) ((sph_u64)(x))
849 #define SPH_64 1
850 #endif
851 
852 #else
853 
854 /*
855  * On non-C99 systems, we use "unsigned int" if it is wide enough,
856  * "unsigned long" otherwise. This supports all "reasonable" architectures.
857  * We have to be cautious: pre-C99 preprocessors handle constants
858  * differently in '#if' expressions. Hence the shifts to test UINT_MAX.
859  */
860 
861 #if ((UINT_MAX >> 11) >> 11) >= 0x3FF
862 
863 typedef unsigned int sph_u32;
864 typedef int sph_s32;
865 
866 #define SPH_C32(x) ((sph_u32)(x ## U))
867 
868 #else
869 
870 typedef unsigned long sph_u32;
871 typedef long sph_s32;
872 
873 #define SPH_C32(x) ((sph_u32)(x ## UL))
874 
875 #endif
876 
877 #if !SPH_NO_64
878 
879 /*
880  * We want a 64-bit type. We use "unsigned long" if it is wide enough (as
881  * is common on 64-bit architectures such as AMD64, Alpha or Sparcv9),
882  * "unsigned long long" otherwise, if available. We use ULLONG_MAX to
883  * test whether "unsigned long long" is available; we also know that
884  * gcc features this type, even if the libc header do not know it.
885  */
886 
887 #if ((ULONG_MAX >> 31) >> 31) >= 3
888 
889 typedef unsigned long sph_u64;
890 typedef long sph_s64;
891 
892 #define SPH_C64(x) ((sph_u64)(x ## UL))
893 
894 #define SPH_64 1
895 
896 #elif ((ULLONG_MAX >> 31) >> 31) >= 3 || defined __GNUC__
897 
898 typedef unsigned long long sph_u64;
899 typedef long long sph_s64;
900 
901 #define SPH_C64(x) ((sph_u64)(x ## ULL))
902 
903 #define SPH_64 1
904 
905 #else
906 
907 /*
908  * No 64-bit type...
909  */
910 
911 #endif
912 
913 #endif
914 
915 #endif
916 
917 /*
918  * If the "unsigned long" type has length 64 bits or more, then this is
919  * a "true" 64-bit architectures. This is also true with Visual C on
920  * amd64, even though the "long" type is limited to 32 bits.
921  */
922 #if SPH_64 && (((ULONG_MAX >> 31) >> 31) >= 3 || defined _M_X64)
923 #define SPH_64_TRUE 1
924 #endif
925 
926 /*
927  * Implementation note: some processors have specific opcodes to perform
928  * a rotation. Recent versions of gcc recognize the expression above and
929  * use the relevant opcodes, when appropriate.
930  */
931 
932 #define SPH_T32(x) ((x) & SPH_C32(0xFFFFFFFF))
933 #define SPH_ROTL32(x, n) SPH_T32(((x) << (n)) | ((x) >> (32 - (n))))
934 #define SPH_ROTR32(x, n) SPH_ROTL32(x, (32 - (n)))
935 
936 #if SPH_64
937 
938 #define SPH_T64(x) ((x) & SPH_C64(0xFFFFFFFFFFFFFFFF))
939 #define SPH_ROTL64(x, n) SPH_T64(((x) << (n)) | ((x) >> (64 - (n))))
940 #define SPH_ROTR64(x, n) SPH_ROTL64(x, (64 - (n)))
941 
942 #endif
943 
944 #ifndef DOXYGEN_IGNORE
945 /*
946  * Define SPH_INLINE to be an "inline" qualifier, if available. We define
947  * some small macro-like functions which benefit greatly from being inlined.
948  */
949 #if (defined __STDC__ && __STDC_VERSION__ >= 199901L) || defined __GNUC__
950 #define SPH_INLINE inline
951 #elif defined _MSC_VER
952 #define SPH_INLINE __inline
953 #else
954 #define SPH_INLINE
955 #endif
956 #endif
957 
958 /*
959  * We define some macros which qualify the architecture. These macros
960  * may be explicit set externally (e.g. as compiler parameters). The
961  * code below sets those macros if they are not already defined.
962  *
963  * Most macros are boolean, thus evaluate to either zero or non-zero.
964  * The SPH_UPTR macro is special, in that it evaluates to a C type,
965  * or is not defined.
966  *
967  * SPH_UPTR if defined: unsigned type to cast pointers into
968  *
969  * SPH_UNALIGNED non-zero if unaligned accesses are efficient
970  * SPH_LITTLE_ENDIAN non-zero if architecture is known to be little-endian
971  * SPH_BIG_ENDIAN non-zero if architecture is known to be big-endian
972  * SPH_LITTLE_FAST non-zero if little-endian decoding is fast
973  * SPH_BIG_FAST non-zero if big-endian decoding is fast
974  *
975  * If SPH_UPTR is defined, then encoding and decoding of 32-bit and 64-bit
976  * values will try to be "smart". Either SPH_LITTLE_ENDIAN or SPH_BIG_ENDIAN
977  * _must_ be non-zero in those situations. The 32-bit and 64-bit types
978  * _must_ also have an exact width.
979  *
980  * SPH_SPARCV9_GCC_32 UltraSPARC-compatible with gcc, 32-bit mode
981  * SPH_SPARCV9_GCC_64 UltraSPARC-compatible with gcc, 64-bit mode
982  * SPH_SPARCV9_GCC UltraSPARC-compatible with gcc
983  * SPH_I386_GCC x86-compatible (32-bit) with gcc
984  * SPH_I386_MSVC x86-compatible (32-bit) with Microsoft Visual C
985  * SPH_AMD64_GCC x86-compatible (64-bit) with gcc
986  * SPH_AMD64_MSVC x86-compatible (64-bit) with Microsoft Visual C
987  * SPH_PPC32_GCC PowerPC, 32-bit, with gcc
988  * SPH_PPC64_GCC PowerPC, 64-bit, with gcc
989  *
990  * TODO: enhance automatic detection, for more architectures and compilers.
991  * Endianness is the most important. SPH_UNALIGNED and SPH_UPTR help with
992  * some very fast functions (e.g. MD4) when using unaligned input data.
993  * The CPU-specific-with-GCC macros are useful only for inline assembly,
994  * normally restrained to this header file.
995  */
996 
997 /*
998  * 32-bit x86, aka "i386 compatible".
999  */
1000 #if defined __i386__ || defined _M_IX86
1001 
1002 #define SPH_DETECT_UNALIGNED 1
1003 #define SPH_DETECT_LITTLE_ENDIAN 1
1004 #define SPH_DETECT_UPTR sph_u32
1005 #ifdef __GNUC__
1006 #define SPH_DETECT_I386_GCC 1
1007 #endif
1008 #ifdef _MSC_VER
1009 #define SPH_DETECT_I386_MSVC 1
1010 #endif
1011 
1012 /*
1013  * 64-bit x86, hereafter known as "amd64".
1014  */
1015 #elif defined __x86_64 || defined _M_X64
1016 
1017 #define SPH_DETECT_UNALIGNED 1
1018 #define SPH_DETECT_LITTLE_ENDIAN 1
1019 #define SPH_DETECT_UPTR sph_u64
1020 #ifdef __GNUC__
1021 #define SPH_DETECT_AMD64_GCC 1
1022 #endif
1023 #ifdef _MSC_VER
1024 #define SPH_DETECT_AMD64_MSVC 1
1025 #endif
1026 
1027 /*
1028  * 64-bit Sparc architecture (implies v9).
1029  */
1030 #elif ((defined __sparc__ || defined __sparc) && defined __arch64__) \
1031  || defined __sparcv9
1032 
1033 #define SPH_DETECT_BIG_ENDIAN 1
1034 #define SPH_DETECT_UPTR sph_u64
1035 #ifdef __GNUC__
1036 #define SPH_DETECT_SPARCV9_GCC_64 1
1037 #define SPH_DETECT_LITTLE_FAST 1
1038 #endif
1039 
1040 /*
1041  * 32-bit Sparc.
1042  */
1043 #elif (defined __sparc__ || defined __sparc) \
1044  && !(defined __sparcv9 || defined __arch64__)
1045 
1046 #define SPH_DETECT_BIG_ENDIAN 1
1047 #define SPH_DETECT_UPTR sph_u32
1048 #if defined __GNUC__ && defined __sparc_v9__
1049 #define SPH_DETECT_SPARCV9_GCC_32 1
1050 #define SPH_DETECT_LITTLE_FAST 1
1051 #endif
1052 
1053 /*
1054  * ARM, little-endian.
1055  */
1056 #elif defined __arm__ && __ARMEL__
1057 
1058 #define SPH_DETECT_LITTLE_ENDIAN 1
1059 
1060 /*
1061  * MIPS, little-endian.
1062  */
1063 #elif MIPSEL || _MIPSEL || __MIPSEL || __MIPSEL__
1064 
1065 #define SPH_DETECT_LITTLE_ENDIAN 1
1066 
1067 /*
1068  * MIPS, big-endian.
1069  */
1070 #elif MIPSEB || _MIPSEB || __MIPSEB || __MIPSEB__
1071 
1072 #define SPH_DETECT_BIG_ENDIAN 1
1073 
1074 /*
1075  * PowerPC.
1076  */
1077 #elif defined __powerpc__ || defined __POWERPC__ || defined __ppc__ \
1078  || defined _ARCH_PPC
1079 
1080 /*
1081  * Note: we do not declare cross-endian access to be "fast": even if
1082  * using inline assembly, implementation should still assume that
1083  * keeping the decoded word in a temporary is faster than decoding
1084  * it again.
1085  */
1086 #if defined __GNUC__
1087 #if SPH_64_TRUE
1088 #define SPH_DETECT_PPC64_GCC 1
1089 #else
1090 #define SPH_DETECT_PPC32_GCC 1
1091 #endif
1092 #endif
1093 
1094 #if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
1095 #define SPH_DETECT_BIG_ENDIAN 1
1096 #elif defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
1097 #define SPH_DETECT_LITTLE_ENDIAN 1
1098 #endif
1099 
1100 /*
1101  * Itanium, 64-bit.
1102  */
1103 #elif defined __ia64 || defined __ia64__ \
1104  || defined __itanium__ || defined _M_IA64
1105 
1106 #if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
1107 #define SPH_DETECT_BIG_ENDIAN 1
1108 #else
1109 #define SPH_DETECT_LITTLE_ENDIAN 1
1110 #endif
1111 #if defined __LP64__ || defined _LP64
1112 #define SPH_DETECT_UPTR sph_u64
1113 #else
1114 #define SPH_DETECT_UPTR sph_u32
1115 #endif
1116 
1117 #endif
1118 
1119 #if defined SPH_DETECT_SPARCV9_GCC_32 || defined SPH_DETECT_SPARCV9_GCC_64
1120 #define SPH_DETECT_SPARCV9_GCC 1
1121 #endif
1122 
1123 #if defined SPH_DETECT_UNALIGNED && !defined SPH_UNALIGNED
1124 #define SPH_UNALIGNED SPH_DETECT_UNALIGNED
1125 #endif
1126 #if defined SPH_DETECT_UPTR && !defined SPH_UPTR
1127 #define SPH_UPTR SPH_DETECT_UPTR
1128 #endif
1129 #if defined SPH_DETECT_LITTLE_ENDIAN && !defined SPH_LITTLE_ENDIAN
1130 #define SPH_LITTLE_ENDIAN SPH_DETECT_LITTLE_ENDIAN
1131 #endif
1132 #if defined SPH_DETECT_BIG_ENDIAN && !defined SPH_BIG_ENDIAN
1133 #define SPH_BIG_ENDIAN SPH_DETECT_BIG_ENDIAN
1134 #endif
1135 #if defined SPH_DETECT_LITTLE_FAST && !defined SPH_LITTLE_FAST
1136 #define SPH_LITTLE_FAST SPH_DETECT_LITTLE_FAST
1137 #endif
1138 #if defined SPH_DETECT_BIG_FAST && !defined SPH_BIG_FAST
1139 #define SPH_BIG_FAST SPH_DETECT_BIG_FAST
1140 #endif
1141 #if defined SPH_DETECT_SPARCV9_GCC_32 && !defined SPH_SPARCV9_GCC_32
1142 #define SPH_SPARCV9_GCC_32 SPH_DETECT_SPARCV9_GCC_32
1143 #endif
1144 #if defined SPH_DETECT_SPARCV9_GCC_64 && !defined SPH_SPARCV9_GCC_64
1145 #define SPH_SPARCV9_GCC_64 SPH_DETECT_SPARCV9_GCC_64
1146 #endif
1147 #if defined SPH_DETECT_SPARCV9_GCC && !defined SPH_SPARCV9_GCC
1148 #define SPH_SPARCV9_GCC SPH_DETECT_SPARCV9_GCC
1149 #endif
1150 #if defined SPH_DETECT_I386_GCC && !defined SPH_I386_GCC
1151 #define SPH_I386_GCC SPH_DETECT_I386_GCC
1152 #endif
1153 #if defined SPH_DETECT_I386_MSVC && !defined SPH_I386_MSVC
1154 #define SPH_I386_MSVC SPH_DETECT_I386_MSVC
1155 #endif
1156 #if defined SPH_DETECT_AMD64_GCC && !defined SPH_AMD64_GCC
1157 #define SPH_AMD64_GCC SPH_DETECT_AMD64_GCC
1158 #endif
1159 #if defined SPH_DETECT_AMD64_MSVC && !defined SPH_AMD64_MSVC
1160 #define SPH_AMD64_MSVC SPH_DETECT_AMD64_MSVC
1161 #endif
1162 #if defined SPH_DETECT_PPC32_GCC && !defined SPH_PPC32_GCC
1163 #define SPH_PPC32_GCC SPH_DETECT_PPC32_GCC
1164 #endif
1165 #if defined SPH_DETECT_PPC64_GCC && !defined SPH_PPC64_GCC
1166 #define SPH_PPC64_GCC SPH_DETECT_PPC64_GCC
1167 #endif
1168 
1169 #if SPH_LITTLE_ENDIAN && !defined SPH_LITTLE_FAST
1170 #define SPH_LITTLE_FAST 1
1171 #endif
1172 #if SPH_BIG_ENDIAN && !defined SPH_BIG_FAST
1173 #define SPH_BIG_FAST 1
1174 #endif
1175 
1176 #if defined SPH_UPTR && !(SPH_LITTLE_ENDIAN || SPH_BIG_ENDIAN)
1177 #error SPH_UPTR defined, but endianness is not known.
1178 #endif
1179 
1180 #if SPH_I386_GCC && !SPH_NO_ASM
1181 
1182 /*
1183  * On x86 32-bit, with gcc, we use the bswapl opcode to byte-swap 32-bit
1184  * values.
1185  */
1186 
1187 static SPH_INLINE sph_u32
1189 {
1190  __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
1191  return x;
1192 }
1193 
1194 #if SPH_64
1195 
1196 static SPH_INLINE sph_u64
1197 sph_bswap64(sph_u64 x)
1198 {
1199  return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
1200  | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
1201 }
1202 
1203 #endif
1204 
1205 #elif SPH_AMD64_GCC && !SPH_NO_ASM
1206 
1207 /*
1208  * On x86 64-bit, with gcc, we use the bswapl opcode to byte-swap 32-bit
1209  * and 64-bit values.
1210  */
1211 
1212 static SPH_INLINE sph_u32
1214 {
1215  __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
1216  return x;
1217 }
1218 
1219 #if SPH_64
1220 
1221 static SPH_INLINE sph_u64
1222 sph_bswap64(sph_u64 x)
1223 {
1224  __asm__ __volatile__ ("bswapq %0" : "=r" (x) : "0" (x));
1225  return x;
1226 }
1227 
1228 #endif
1229 
1230 /*
1231  * Disabled code. Apparently, Microsoft Visual C 2005 is smart enough
1232  * to generate proper opcodes for endianness swapping with the pure C
1233  * implementation below.
1234  *
1235 
1236 #elif SPH_I386_MSVC && !SPH_NO_ASM
1237 
1238 static __inline sph_u32 __declspec(naked) __fastcall
1239 sph_bswap32(sph_u32 x)
1240 {
1241  __asm {
1242  bswap ecx
1243  mov eax,ecx
1244  ret
1245  }
1246 }
1247 
1248 #if SPH_64
1249 
1250 static SPH_INLINE sph_u64
1251 sph_bswap64(sph_u64 x)
1252 {
1253  return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
1254  | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
1255 }
1256 
1257 #endif
1258 
1259  *
1260  * [end of disabled code]
1261  */
1262 
1263 #else
1264 
1265 static SPH_INLINE sph_u32
1267 {
1268  x = SPH_T32((x << 16) | (x >> 16));
1269  x = ((x & SPH_C32(0xFF00FF00)) >> 8)
1270  | ((x & SPH_C32(0x00FF00FF)) << 8);
1271  return x;
1272 }
1273 
1274 #if SPH_64
1275 
1282 static SPH_INLINE sph_u64
1283 sph_bswap64(sph_u64 x)
1284 {
1285  x = SPH_T64((x << 32) | (x >> 32));
1286  x = ((x & SPH_C64(0xFFFF0000FFFF0000)) >> 16)
1287  | ((x & SPH_C64(0x0000FFFF0000FFFF)) << 16);
1288  x = ((x & SPH_C64(0xFF00FF00FF00FF00)) >> 8)
1289  | ((x & SPH_C64(0x00FF00FF00FF00FF)) << 8);
1290  return x;
1291 }
1292 
1293 #endif
1294 
1295 #endif
1296 
1297 #if SPH_SPARCV9_GCC && !SPH_NO_ASM
1298 
1299 /*
1300  * On UltraSPARC systems, native ordering is big-endian, but it is
1301  * possible to perform little-endian read accesses by specifying the
1302  * address space 0x88 (ASI_PRIMARY_LITTLE). Basically, either we use
1303  * the opcode "lda [%reg]0x88,%dst", where %reg is the register which
1304  * contains the source address and %dst is the destination register,
1305  * or we use "lda [%reg+imm]%asi,%dst", which uses the %asi register
1306  * to get the address space name. The latter format is better since it
1307  * combines an addition and the actual access in a single opcode; but
1308  * it requires the setting (and subsequent resetting) of %asi, which is
1309  * slow. Some operations (i.e. MD5 compression function) combine many
1310  * successive little-endian read accesses, which may share the same
1311  * %asi setting. The macros below contain the appropriate inline
1312  * assembly.
1313  */
1314 
1315 #define SPH_SPARCV9_SET_ASI \
1316  sph_u32 sph_sparcv9_asi; \
1317  __asm__ __volatile__ ( \
1318  "rd %%asi,%0\n\twr %%g0,0x88,%%asi" : "=r" (sph_sparcv9_asi));
1319 
1320 #define SPH_SPARCV9_RESET_ASI \
1321  __asm__ __volatile__ ("wr %%g0,%0,%%asi" : : "r" (sph_sparcv9_asi));
1322 
1323 #define SPH_SPARCV9_DEC32LE(base, idx) ({ \
1324  sph_u32 sph_sparcv9_tmp; \
1325  __asm__ __volatile__ ("lda [%1+" #idx "*4]%%asi,%0" \
1326  : "=r" (sph_sparcv9_tmp) : "r" (base)); \
1327  sph_sparcv9_tmp; \
1328  })
1329 
1330 #endif
1331 
1332 static SPH_INLINE void
1333 sph_enc16be(void *dst, unsigned val)
1334 {
1335  ((unsigned char *)dst)[0] = (val >> 8);
1336  ((unsigned char *)dst)[1] = val;
1337 }
1338 
1339 static SPH_INLINE unsigned
1340 sph_dec16be(const void *src)
1341 {
1342  return ((unsigned)(((const unsigned char *)src)[0]) << 8)
1343  | (unsigned)(((const unsigned char *)src)[1]);
1344 }
1345 
1346 static SPH_INLINE void
1347 sph_enc16le(void *dst, unsigned val)
1348 {
1349  ((unsigned char *)dst)[0] = val;
1350  ((unsigned char *)dst)[1] = val >> 8;
1351 }
1352 
1353 static SPH_INLINE unsigned
1354 sph_dec16le(const void *src)
1355 {
1356  return (unsigned)(((const unsigned char *)src)[0])
1357  | ((unsigned)(((const unsigned char *)src)[1]) << 8);
1358 }
1359 
1366 static SPH_INLINE void
1367 sph_enc32be(void *dst, sph_u32 val)
1368 {
1369 #if defined SPH_UPTR
1370 #if SPH_UNALIGNED
1371 #if SPH_LITTLE_ENDIAN
1372  val = sph_bswap32(val);
1373 #endif
1374  *(sph_u32 *)dst = val;
1375 #else
1376  if (((SPH_UPTR)dst & 3) == 0) {
1377 #if SPH_LITTLE_ENDIAN
1378  val = sph_bswap32(val);
1379 #endif
1380  *(sph_u32 *)dst = val;
1381  } else {
1382  ((unsigned char *)dst)[0] = (val >> 24);
1383  ((unsigned char *)dst)[1] = (val >> 16);
1384  ((unsigned char *)dst)[2] = (val >> 8);
1385  ((unsigned char *)dst)[3] = val;
1386  }
1387 #endif
1388 #else
1389  ((unsigned char *)dst)[0] = (val >> 24);
1390  ((unsigned char *)dst)[1] = (val >> 16);
1391  ((unsigned char *)dst)[2] = (val >> 8);
1392  ((unsigned char *)dst)[3] = val;
1393 #endif
1394 }
1395 
1403 static SPH_INLINE void
1405 {
1406 #if SPH_LITTLE_ENDIAN
1407  *(sph_u32 *)dst = sph_bswap32(val);
1408 #elif SPH_BIG_ENDIAN
1409  *(sph_u32 *)dst = val;
1410 #else
1411  ((unsigned char *)dst)[0] = (val >> 24);
1412  ((unsigned char *)dst)[1] = (val >> 16);
1413  ((unsigned char *)dst)[2] = (val >> 8);
1414  ((unsigned char *)dst)[3] = val;
1415 #endif
1416 }
1417 
1424 static SPH_INLINE sph_u32
1425 sph_dec32be(const void *src)
1426 {
1427 #if defined SPH_UPTR
1428 #if SPH_UNALIGNED
1429 #if SPH_LITTLE_ENDIAN
1430  return sph_bswap32(*(const sph_u32 *)src);
1431 #else
1432  return *(const sph_u32 *)src;
1433 #endif
1434 #else
1435  if (((SPH_UPTR)src & 3) == 0) {
1436 #if SPH_LITTLE_ENDIAN
1437  return sph_bswap32(*(const sph_u32 *)src);
1438 #else
1439  return *(const sph_u32 *)src;
1440 #endif
1441  } else {
1442  return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
1443  | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
1444  | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
1445  | (sph_u32)(((const unsigned char *)src)[3]);
1446  }
1447 #endif
1448 #else
1449  return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
1450  | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
1451  | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
1452  | (sph_u32)(((const unsigned char *)src)[3]);
1453 #endif
1454 }
1455 
1463 static SPH_INLINE sph_u32
1464 sph_dec32be_aligned(const void *src)
1465 {
1466 #if SPH_LITTLE_ENDIAN
1467  return sph_bswap32(*(const sph_u32 *)src);
1468 #elif SPH_BIG_ENDIAN
1469  return *(const sph_u32 *)src;
1470 #else
1471  return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
1472  | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
1473  | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
1474  | (sph_u32)(((const unsigned char *)src)[3]);
1475 #endif
1476 }
1477 
1484 static SPH_INLINE void
1485 sph_enc32le(void *dst, sph_u32 val)
1486 {
1487 #if defined SPH_UPTR
1488 #if SPH_UNALIGNED
1489 #if SPH_BIG_ENDIAN
1490  val = sph_bswap32(val);
1491 #endif
1492  *(sph_u32 *)dst = val;
1493 #else
1494  if (((SPH_UPTR)dst & 3) == 0) {
1495 #if SPH_BIG_ENDIAN
1496  val = sph_bswap32(val);
1497 #endif
1498  *(sph_u32 *)dst = val;
1499  } else {
1500  ((unsigned char *)dst)[0] = val;
1501  ((unsigned char *)dst)[1] = (val >> 8);
1502  ((unsigned char *)dst)[2] = (val >> 16);
1503  ((unsigned char *)dst)[3] = (val >> 24);
1504  }
1505 #endif
1506 #else
1507  ((unsigned char *)dst)[0] = val;
1508  ((unsigned char *)dst)[1] = (val >> 8);
1509  ((unsigned char *)dst)[2] = (val >> 16);
1510  ((unsigned char *)dst)[3] = (val >> 24);
1511 #endif
1512 }
1513 
1521 static SPH_INLINE void
1523 {
1524 #if SPH_LITTLE_ENDIAN
1525  *(sph_u32 *)dst = val;
1526 #elif SPH_BIG_ENDIAN
1527  *(sph_u32 *)dst = sph_bswap32(val);
1528 #else
1529  ((unsigned char *)dst)[0] = val;
1530  ((unsigned char *)dst)[1] = (val >> 8);
1531  ((unsigned char *)dst)[2] = (val >> 16);
1532  ((unsigned char *)dst)[3] = (val >> 24);
1533 #endif
1534 }
1535 
1542 static SPH_INLINE sph_u32
1543 sph_dec32le(const void *src)
1544 {
1545 #if defined SPH_UPTR
1546 #if SPH_UNALIGNED
1547 #if SPH_BIG_ENDIAN
1548  return sph_bswap32(*(const sph_u32 *)src);
1549 #else
1550  return *(const sph_u32 *)src;
1551 #endif
1552 #else
1553  if (((SPH_UPTR)src & 3) == 0) {
1554 #if SPH_BIG_ENDIAN
1555 #if SPH_SPARCV9_GCC && !SPH_NO_ASM
1556  sph_u32 tmp;
1557 
1558  /*
1559  * "__volatile__" is needed here because without it,
1560  * gcc-3.4.3 miscompiles the code and performs the
1561  * access before the test on the address, thus triggering
1562  * a bus error...
1563  */
1564  __asm__ __volatile__ (
1565  "lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
1566  return tmp;
1567 /*
1568  * On PowerPC, this turns out not to be worth the effort: the inline
1569  * assembly makes GCC optimizer uncomfortable, which tends to nullify
1570  * the decoding gains.
1571  *
1572  * For most hash functions, using this inline assembly trick changes
1573  * hashing speed by less than 5% and often _reduces_ it. The biggest
1574  * gains are for MD4 (+11%) and CubeHash (+30%). For all others, it is
1575  * less then 10%. The speed gain on CubeHash is probably due to the
1576  * chronic shortage of registers that CubeHash endures; for the other
1577  * functions, the generic code appears to be efficient enough already.
1578  *
1579 #elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
1580  sph_u32 tmp;
1581 
1582  __asm__ __volatile__ (
1583  "lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
1584  return tmp;
1585  */
1586 #else
1587  return sph_bswap32(*(const sph_u32 *)src);
1588 #endif
1589 #else
1590  return *(const sph_u32 *)src;
1591 #endif
1592  } else {
1593  return (sph_u32)(((const unsigned char *)src)[0])
1594  | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
1595  | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
1596  | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
1597  }
1598 #endif
1599 #else
1600  return (sph_u32)(((const unsigned char *)src)[0])
1601  | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
1602  | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
1603  | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
1604 #endif
1605 }
1606 
1614 static SPH_INLINE sph_u32
1615 sph_dec32le_aligned(const void *src)
1616 {
1617 #if SPH_LITTLE_ENDIAN
1618  return *(const sph_u32 *)src;
1619 #elif SPH_BIG_ENDIAN
1620 #if SPH_SPARCV9_GCC && !SPH_NO_ASM
1621  sph_u32 tmp;
1622 
1623  __asm__ __volatile__ ("lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
1624  return tmp;
1625 /*
1626  * Not worth it generally.
1627  *
1628 #elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
1629  sph_u32 tmp;
1630 
1631  __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
1632  return tmp;
1633  */
1634 #else
1635  return sph_bswap32(*(const sph_u32 *)src);
1636 #endif
1637 #else
1638  return (sph_u32)(((const unsigned char *)src)[0])
1639  | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
1640  | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
1641  | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
1642 #endif
1643 }
1644 
1645 #if SPH_64
1646 
1653 static SPH_INLINE void
1654 sph_enc64be(void *dst, sph_u64 val)
1655 {
1656 #if defined SPH_UPTR
1657 #if SPH_UNALIGNED
1658 #if SPH_LITTLE_ENDIAN
1659  val = sph_bswap64(val);
1660 #endif
1661  *(sph_u64 *)dst = val;
1662 #else
1663  if (((SPH_UPTR)dst & 7) == 0) {
1664 #if SPH_LITTLE_ENDIAN
1665  val = sph_bswap64(val);
1666 #endif
1667  *(sph_u64 *)dst = val;
1668  } else {
1669  ((unsigned char *)dst)[0] = (val >> 56);
1670  ((unsigned char *)dst)[1] = (val >> 48);
1671  ((unsigned char *)dst)[2] = (val >> 40);
1672  ((unsigned char *)dst)[3] = (val >> 32);
1673  ((unsigned char *)dst)[4] = (val >> 24);
1674  ((unsigned char *)dst)[5] = (val >> 16);
1675  ((unsigned char *)dst)[6] = (val >> 8);
1676  ((unsigned char *)dst)[7] = val;
1677  }
1678 #endif
1679 #else
1680  ((unsigned char *)dst)[0] = (val >> 56);
1681  ((unsigned char *)dst)[1] = (val >> 48);
1682  ((unsigned char *)dst)[2] = (val >> 40);
1683  ((unsigned char *)dst)[3] = (val >> 32);
1684  ((unsigned char *)dst)[4] = (val >> 24);
1685  ((unsigned char *)dst)[5] = (val >> 16);
1686  ((unsigned char *)dst)[6] = (val >> 8);
1687  ((unsigned char *)dst)[7] = val;
1688 #endif
1689 }
1690 
1698 static SPH_INLINE void
1699 sph_enc64be_aligned(void *dst, sph_u64 val)
1700 {
1701 #if SPH_LITTLE_ENDIAN
1702  *(sph_u64 *)dst = sph_bswap64(val);
1703 #elif SPH_BIG_ENDIAN
1704  *(sph_u64 *)dst = val;
1705 #else
1706  ((unsigned char *)dst)[0] = (val >> 56);
1707  ((unsigned char *)dst)[1] = (val >> 48);
1708  ((unsigned char *)dst)[2] = (val >> 40);
1709  ((unsigned char *)dst)[3] = (val >> 32);
1710  ((unsigned char *)dst)[4] = (val >> 24);
1711  ((unsigned char *)dst)[5] = (val >> 16);
1712  ((unsigned char *)dst)[6] = (val >> 8);
1713  ((unsigned char *)dst)[7] = val;
1714 #endif
1715 }
1716 
1723 static SPH_INLINE sph_u64
1724 sph_dec64be(const void *src)
1725 {
1726 #if defined SPH_UPTR
1727 #if SPH_UNALIGNED
1728 #if SPH_LITTLE_ENDIAN
1729  return sph_bswap64(*(const sph_u64 *)src);
1730 #else
1731  return *(const sph_u64 *)src;
1732 #endif
1733 #else
1734  if (((SPH_UPTR)src & 7) == 0) {
1735 #if SPH_LITTLE_ENDIAN
1736  return sph_bswap64(*(const sph_u64 *)src);
1737 #else
1738  return *(const sph_u64 *)src;
1739 #endif
1740  } else {
1741  return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
1742  | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
1743  | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
1744  | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
1745  | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
1746  | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
1747  | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
1748  | (sph_u64)(((const unsigned char *)src)[7]);
1749  }
1750 #endif
1751 #else
1752  return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
1753  | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
1754  | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
1755  | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
1756  | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
1757  | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
1758  | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
1759  | (sph_u64)(((const unsigned char *)src)[7]);
1760 #endif
1761 }
1762 
1770 static SPH_INLINE sph_u64
1771 sph_dec64be_aligned(const void *src)
1772 {
1773 #if SPH_LITTLE_ENDIAN
1774  return sph_bswap64(*(const sph_u64 *)src);
1775 #elif SPH_BIG_ENDIAN
1776  return *(const sph_u64 *)src;
1777 #else
1778  return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
1779  | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
1780  | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
1781  | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
1782  | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
1783  | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
1784  | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
1785  | (sph_u64)(((const unsigned char *)src)[7]);
1786 #endif
1787 }
1788 
1795 static SPH_INLINE void
1796 sph_enc64le(void *dst, sph_u64 val)
1797 {
1798 #if defined SPH_UPTR
1799 #if SPH_UNALIGNED
1800 #if SPH_BIG_ENDIAN
1801  val = sph_bswap64(val);
1802 #endif
1803  *(sph_u64 *)dst = val;
1804 #else
1805  if (((SPH_UPTR)dst & 7) == 0) {
1806 #if SPH_BIG_ENDIAN
1807  val = sph_bswap64(val);
1808 #endif
1809  *(sph_u64 *)dst = val;
1810  } else {
1811  ((unsigned char *)dst)[0] = val;
1812  ((unsigned char *)dst)[1] = (val >> 8);
1813  ((unsigned char *)dst)[2] = (val >> 16);
1814  ((unsigned char *)dst)[3] = (val >> 24);
1815  ((unsigned char *)dst)[4] = (val >> 32);
1816  ((unsigned char *)dst)[5] = (val >> 40);
1817  ((unsigned char *)dst)[6] = (val >> 48);
1818  ((unsigned char *)dst)[7] = (val >> 56);
1819  }
1820 #endif
1821 #else
1822  ((unsigned char *)dst)[0] = val;
1823  ((unsigned char *)dst)[1] = (val >> 8);
1824  ((unsigned char *)dst)[2] = (val >> 16);
1825  ((unsigned char *)dst)[3] = (val >> 24);
1826  ((unsigned char *)dst)[4] = (val >> 32);
1827  ((unsigned char *)dst)[5] = (val >> 40);
1828  ((unsigned char *)dst)[6] = (val >> 48);
1829  ((unsigned char *)dst)[7] = (val >> 56);
1830 #endif
1831 }
1832 
1840 static SPH_INLINE void
1841 sph_enc64le_aligned(void *dst, sph_u64 val)
1842 {
1843 #if SPH_LITTLE_ENDIAN
1844  *(sph_u64 *)dst = val;
1845 #elif SPH_BIG_ENDIAN
1846  *(sph_u64 *)dst = sph_bswap64(val);
1847 #else
1848  ((unsigned char *)dst)[0] = val;
1849  ((unsigned char *)dst)[1] = (val >> 8);
1850  ((unsigned char *)dst)[2] = (val >> 16);
1851  ((unsigned char *)dst)[3] = (val >> 24);
1852  ((unsigned char *)dst)[4] = (val >> 32);
1853  ((unsigned char *)dst)[5] = (val >> 40);
1854  ((unsigned char *)dst)[6] = (val >> 48);
1855  ((unsigned char *)dst)[7] = (val >> 56);
1856 #endif
1857 }
1858 
1865 static SPH_INLINE sph_u64
1866 sph_dec64le(const void *src)
1867 {
1868 #if defined SPH_UPTR
1869 #if SPH_UNALIGNED
1870 #if SPH_BIG_ENDIAN
1871  return sph_bswap64(*(const sph_u64 *)src);
1872 #else
1873  return *(const sph_u64 *)src;
1874 #endif
1875 #else
1876  if (((SPH_UPTR)src & 7) == 0) {
1877 #if SPH_BIG_ENDIAN
1878 #if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
1879  sph_u64 tmp;
1880 
1881  __asm__ __volatile__ (
1882  "ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
1883  return tmp;
1884 /*
1885  * Not worth it generally.
1886  *
1887 #elif SPH_PPC32_GCC && !SPH_NO_ASM
1888  return (sph_u64)sph_dec32le_aligned(src)
1889  | ((sph_u64)sph_dec32le_aligned(
1890  (const char *)src + 4) << 32);
1891 #elif SPH_PPC64_GCC && !SPH_NO_ASM
1892  sph_u64 tmp;
1893 
1894  __asm__ __volatile__ (
1895  "ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
1896  return tmp;
1897  */
1898 #else
1899  return sph_bswap64(*(const sph_u64 *)src);
1900 #endif
1901 #else
1902  return *(const sph_u64 *)src;
1903 #endif
1904  } else {
1905  return (sph_u64)(((const unsigned char *)src)[0])
1906  | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
1907  | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
1908  | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
1909  | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
1910  | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
1911  | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
1912  | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
1913  }
1914 #endif
1915 #else
1916  return (sph_u64)(((const unsigned char *)src)[0])
1917  | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
1918  | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
1919  | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
1920  | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
1921  | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
1922  | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
1923  | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
1924 #endif
1925 }
1926 
1934 static SPH_INLINE sph_u64
1935 sph_dec64le_aligned(const void *src)
1936 {
1937 #if SPH_LITTLE_ENDIAN
1938  return *(const sph_u64 *)src;
1939 #elif SPH_BIG_ENDIAN
1940 #if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
1941  sph_u64 tmp;
1942 
1943  __asm__ __volatile__ ("ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
1944  return tmp;
1945 /*
1946  * Not worth it generally.
1947  *
1948 #elif SPH_PPC32_GCC && !SPH_NO_ASM
1949  return (sph_u64)sph_dec32le_aligned(src)
1950  | ((sph_u64)sph_dec32le_aligned((const char *)src + 4) << 32);
1951 #elif SPH_PPC64_GCC && !SPH_NO_ASM
1952  sph_u64 tmp;
1953 
1954  __asm__ __volatile__ ("ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
1955  return tmp;
1956  */
1957 #else
1958  return sph_bswap64(*(const sph_u64 *)src);
1959 #endif
1960 #else
1961  return (sph_u64)(((const unsigned char *)src)[0])
1962  | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
1963  | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
1964  | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
1965  | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
1966  | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
1967  | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
1968  | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
1969 #endif
1970 }
1971 
1972 #endif
1973 
1974 #endif /* Doxygen excluded block */
1975 
1976 #endif
static SPH_INLINE unsigned sph_dec16le(const void *src)
Definition: sph_types.h:1354
long sph_s32
Definition: sph_types.h:871
static SPH_INLINE void sph_enc16le(void *dst, unsigned val)
Definition: sph_types.h:1347
static SPH_INLINE void sph_enc32le_aligned(void *dst, sph_u32 val)
Encode a 32-bit value into the provided buffer (little endian convention).
Definition: sph_types.h:1522
static SPH_INLINE void sph_enc16be(void *dst, unsigned val)
Definition: sph_types.h:1333
static SPH_INLINE sph_u32 sph_dec32le_aligned(const void *src)
Decode a 32-bit value from the provided buffer (little endian convention).
Definition: sph_types.h:1615
#define SPH_T32(x)
Definition: sph_types.h:932
#define SPH_C32(x)
Definition: sph_types.h:873
static SPH_INLINE sph_u32 sph_dec32be_aligned(const void *src)
Decode a 32-bit value from the provided buffer (big endian convention).
Definition: sph_types.h:1464
#define SPH_INLINE
Definition: sph_types.h:954
static SPH_INLINE sph_u32 sph_dec32be(const void *src)
Decode a 32-bit value from the provided buffer (big endian convention).
Definition: sph_types.h:1425
static SPH_INLINE unsigned sph_dec16be(const void *src)
Definition: sph_types.h:1340
static SPH_INLINE void sph_enc32le(void *dst, sph_u32 val)
Encode a 32-bit value into the provided buffer (little endian convention).
Definition: sph_types.h:1485
static SPH_INLINE sph_u32 sph_dec32le(const void *src)
Decode a 32-bit value from the provided buffer (little endian convention).
Definition: sph_types.h:1543
static SPH_INLINE void sph_enc32be_aligned(void *dst, sph_u32 val)
Encode a 32-bit value into the provided buffer (big endian convention).
Definition: sph_types.h:1404
static SPH_INLINE void sph_enc32be(void *dst, sph_u32 val)
Encode a 32-bit value into the provided buffer (big endian convention).
Definition: sph_types.h:1367
unsigned long sph_u32
Definition: sph_types.h:870
static SPH_INLINE sph_u32 sph_bswap32(sph_u32 x)
Definition: sph_types.h:1266
Released under the MIT license