Skip to content

Commit 802b123

Browse files
peffgitster
authored andcommitted
block-sha1: factor out get_be and put_be wrappers
The BLK_SHA1 code has optimized wrappers for doing endian conversions on memory that may not be aligned. Let's pull them out so that we can use them elsewhere, especially the time-tested list of platforms that prefer each strategy. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
1 parent 1a6d8b9 commit 802b123

File tree

2 files changed

+32
-32
lines changed

2 files changed

+32
-32
lines changed

block-sha1/sha1.c

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -62,38 +62,6 @@
6262
#define setW(x, val) (W(x) = (val))
6363
#endif
6464

65-
/*
66-
* Performance might be improved if the CPU architecture is OK with
67-
* unaligned 32-bit loads and a fast ntohl() is available.
68-
* Otherwise fall back to byte loads and shifts which is portable,
69-
* and is faster on architectures with memory alignment issues.
70-
*/
71-
72-
#if defined(__i386__) || defined(__x86_64__) || \
73-
defined(_M_IX86) || defined(_M_X64) || \
74-
defined(__ppc__) || defined(__ppc64__) || \
75-
defined(__powerpc__) || defined(__powerpc64__) || \
76-
defined(__s390__) || defined(__s390x__)
77-
78-
#define get_be32(p) ntohl(*(unsigned int *)(p))
79-
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
80-
81-
#else
82-
83-
#define get_be32(p) ( \
84-
(*((unsigned char *)(p) + 0) << 24) | \
85-
(*((unsigned char *)(p) + 1) << 16) | \
86-
(*((unsigned char *)(p) + 2) << 8) | \
87-
(*((unsigned char *)(p) + 3) << 0) )
88-
#define put_be32(p, v) do { \
89-
unsigned int __v = (v); \
90-
*((unsigned char *)(p) + 0) = __v >> 24; \
91-
*((unsigned char *)(p) + 1) = __v >> 16; \
92-
*((unsigned char *)(p) + 2) = __v >> 8; \
93-
*((unsigned char *)(p) + 3) = __v >> 0; } while (0)
94-
95-
#endif
96-
9765
/* This "rolls" over the 512-bit array */
9866
#define W(x) (array[(x)&15])
9967

compat/bswap.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,3 +122,35 @@ static inline uint64_t git_bswap64(uint64_t x)
122122
#endif
123123

124124
#endif
125+
126+
/*
127+
* Performance might be improved if the CPU architecture is OK with
128+
* unaligned 32-bit loads and a fast ntohl() is available.
129+
* Otherwise fall back to byte loads and shifts which is portable,
130+
* and is faster on architectures with memory alignment issues.
131+
*/
132+
133+
#if defined(__i386__) || defined(__x86_64__) || \
134+
defined(_M_IX86) || defined(_M_X64) || \
135+
defined(__ppc__) || defined(__ppc64__) || \
136+
defined(__powerpc__) || defined(__powerpc64__) || \
137+
defined(__s390__) || defined(__s390x__)
138+
139+
#define get_be32(p) ntohl(*(unsigned int *)(p))
140+
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
141+
142+
#else
143+
144+
#define get_be32(p) ( \
145+
(*((unsigned char *)(p) + 0) << 24) | \
146+
(*((unsigned char *)(p) + 1) << 16) | \
147+
(*((unsigned char *)(p) + 2) << 8) | \
148+
(*((unsigned char *)(p) + 3) << 0) )
149+
#define put_be32(p, v) do { \
150+
unsigned int __v = (v); \
151+
*((unsigned char *)(p) + 0) = __v >> 24; \
152+
*((unsigned char *)(p) + 1) = __v >> 16; \
153+
*((unsigned char *)(p) + 2) = __v >> 8; \
154+
*((unsigned char *)(p) + 3) = __v >> 0; } while (0)
155+
156+
#endif

0 commit comments

Comments
 (0)