Skip to content

Commit 86cca37

Browse files
committed
Merge branch 'jk/drop-unaligned-loads'
Compilation fix around type punning. * jk/drop-unaligned-loads: Revert "fast-export: use local array to store anonymized oid" bswap.h: drop unaligned loads
2 parents 94de88c + 176380f commit 86cca37

File tree

3 files changed

+4
-29
lines changed

3 files changed

+4
-29
lines changed

Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1232,7 +1232,6 @@ SANITIZERS := $(foreach flag,$(subst $(comma),$(space),$(SANITIZE)),$(flag))
12321232
BASIC_CFLAGS += -fsanitize=$(SANITIZE) -fno-sanitize-recover=$(SANITIZE)
12331233
BASIC_CFLAGS += -fno-omit-frame-pointer
12341234
ifneq ($(filter undefined,$(SANITIZERS)),)
1235-
BASIC_CFLAGS += -DNO_UNALIGNED_LOADS
12361235
BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
12371236
endif
12381237
ifneq ($(filter leak,$(SANITIZERS)),)

builtin/fast-export.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -405,12 +405,12 @@ static char *generate_fake_oid(void *data)
405405
{
406406
static uint32_t counter = 1; /* avoid null oid */
407407
const unsigned hashsz = the_hash_algo->rawsz;
408-
unsigned char out[GIT_MAX_RAWSZ];
408+
struct object_id oid;
409409
char *hex = xmallocz(GIT_MAX_HEXSZ);
410410

411-
hashclr(out);
412-
put_be32(out + hashsz - 4, counter++);
413-
return hash_to_hex_algop_r(hex, out, the_hash_algo);
411+
oidclr(&oid);
412+
put_be32(oid.hash + hashsz - 4, counter++);
413+
return oid_to_hex_r(hex, &oid);
414414
}
415415

416416
static const char *anonymize_oid(const char *oid_hex)

compat/bswap.h

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -145,28 +145,6 @@ static inline uint64_t git_bswap64(uint64_t x)
145145

146146
#endif
147147

148-
/*
149-
* Performance might be improved if the CPU architecture is OK with
150-
* unaligned 32-bit loads and a fast ntohl() is available.
151-
* Otherwise fall back to byte loads and shifts which is portable,
152-
* and is faster on architectures with memory alignment issues.
153-
*/
154-
155-
#if !defined(NO_UNALIGNED_LOADS) && ( \
156-
defined(__i386__) || defined(__x86_64__) || \
157-
defined(_M_IX86) || defined(_M_X64) || \
158-
defined(__ppc__) || defined(__ppc64__) || \
159-
defined(__powerpc__) || defined(__powerpc64__) || \
160-
defined(__s390__) || defined(__s390x__))
161-
162-
#define get_be16(p) ntohs(*(unsigned short *)(p))
163-
#define get_be32(p) ntohl(*(unsigned int *)(p))
164-
#define get_be64(p) ntohll(*(uint64_t *)(p))
165-
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
166-
#define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
167-
168-
#else
169-
170148
static inline uint16_t get_be16(const void *ptr)
171149
{
172150
const unsigned char *p = ptr;
@@ -212,6 +190,4 @@ static inline void put_be64(void *ptr, uint64_t value)
212190
p[7] = value >> 0;
213191
}
214192

215-
#endif
216-
217193
#endif /* COMPAT_BSWAP_H */

0 commit comments

Comments
 (0)