@@ -257,7 +257,7 @@ if (!is(T == enum) && __traits(isStaticArray, T) && canBitwiseHash!T)
257257 // else static if (T.length == 1)
258258 // return hashOf(val[0], seed);
259259 // else
260- // /+ hash like a dynamic array +/
260+ // return bytesHashWithExactSizeAndAlignment!T(toUbyte(val), seed);
261261 //
262262 // ... but that's inefficient when using a runtime TypeInfo (introduces a branch)
263263 // and PR #2243 wants typeid(T).getHash(&val) to produce the same result as
@@ -412,7 +412,7 @@ size_t hashOf(T)(scope const T val, size_t seed = 0) if (!is(T == enum) && __tra
412412 else static if (T.mant_dig == double .mant_dig && T.sizeof == ulong .sizeof)
413413 return hashOf (* cast (const ulong * ) &data, seed);
414414 else
415- return bytesHashAlignedBy ! T(toUbyte(data), seed);
415+ return bytesHashWithExactSizeAndAlignment ! T(toUbyte(data), seed);
416416 }
417417 else
418418 {
534534 }
535535 else static if ((is (T == struct ) && ! canBitwiseHash! T) || T.tupleof.length == 1 )
536536 {
537+ static if (isChained) size_t h = seed;
537538 static foreach (i, F; typeof (val.tupleof))
538539 {
539- static if (i != 0 )
540- h = hashOf(val.tupleof[i], h);
541- else static if (isChained)
542- size_t h = hashOf(val.tupleof[i], seed);
540+ static if (__traits(isStaticArray, F))
541+ {
542+ static if (i == 0 && ! isChained) size_t h = 0 ;
543+ static if (F.sizeof > 0 && canBitwiseHash! F)
544+ // May use smallBytesHash instead of bytesHash.
545+ h = bytesHashWithExactSizeAndAlignment! F(toUbyte(val.tupleof[i]), h);
546+ else
547+ // We can avoid the "double hashing" the top-level version uses
548+ // for consistency with TypeInfo.getHash.
549+ foreach (ref e; val.tupleof[i])
550+ h = hashOf(e, h);
551+ }
552+ else static if (is (F == struct ) || is (F == union ))
553+ {
554+ static if (hasCallableToHash! F)
555+ {
556+ static if (i == 0 && ! isChained)
557+ size_t h = val.tupleof[i].toHash();
558+ else
559+ h = hashOf(cast (size_t ) val.tupleof[i].toHash(), h);
560+ }
561+ else static if (F.tupleof.length == 1 )
562+ {
563+ // Handle the single member case separately to avoid unnecessarily using bytesHash.
564+ static if (i == 0 && ! isChained)
565+ size_t h = hashOf(val.tupleof[i].tupleof[0 ]);
566+ else
567+ h = hashOf(val.tupleof[i].tupleof[0 ], h);
568+ }
569+ else static if (canBitwiseHash! F)
570+ {
571+ // May use smallBytesHash instead of bytesHash.
572+ static if (i == 0 && ! isChained) size_t h = 0 ;
573+ h = bytesHashWithExactSizeAndAlignment! F(toUbyte(val.tupleof[i]), h);
574+ }
575+ else
576+ {
577+ // Nothing special happening.
578+ static if (i == 0 && ! isChained)
579+ size_t h = hashOf(val.tupleof[i]);
580+ else
581+ h = hashOf(val.tupleof[i], h);
582+ }
583+ }
543584 else
544- size_t h = hashOf(val.tupleof[i]);
585+ {
586+ // Nothing special happening.
587+ static if (i == 0 && ! isChained)
588+ size_t h = hashOf(val.tupleof[i]);
589+ else
590+ h = hashOf(val.tupleof[i], h);
591+ }
545592 }
546593 return h;
547594 }
548595 else static if (is (typeof (toUbyte(val)) == const (ubyte )[]))// CTFE ready for structs without reference fields
549596 {
597+ // Not using bytesHashWithExactSizeAndAlignment here because
598+ // the result may differ from typeid(T).hashOf(&val).
550599 return bytesHashAlignedBy! T(toUbyte(val), seed);
551600 }
552601 else // CTFE unsupported
553602 {
554- assert (! __ctfe, " unable to compute hash of " ~ T.stringof);
603+ assert (! __ctfe, " unable to compute hash of " ~ T.stringof~ " at compile time " );
555604 const (ubyte )[] bytes = (() @trusted => (cast (const (ubyte )* )&val)[0 .. T.sizeof])();
605+ // Not using bytesHashWithExactSizeAndAlignment here because
606+ // the result may differ from typeid(T).hashOf(&val).
556607 return bytesHashAlignedBy! T(bytes, seed);
557608 }
558609 }
@@ -596,9 +647,9 @@ if (!is(T == enum) && (is(T == struct) || is(T == union))
596647@trusted @nogc nothrow pure
597648size_t hashOf (T)(scope const T val, size_t seed = 0 ) if (! is (T == enum ) && is (T == delegate ))
598649{
599- assert (! __ctfe, " unable to compute hash of " ~ T.stringof);
650+ assert (! __ctfe, " unable to compute hash of " ~ T.stringof~ " at compile time " );
600651 const (ubyte )[] bytes = (cast (const (ubyte )* )&val)[0 .. T.sizeof];
601- return bytesHashAlignedBy ! T(bytes, seed);
652+ return bytesHashWithExactSizeAndAlignment ! T(bytes, seed);
602653}
603654
604655// address-based class hash. CTFE only if null.
@@ -685,6 +736,31 @@ private template bytesHashAlignedBy(AlignType)
685736 alias bytesHashAlignedBy = bytesHash! (AlignType.alignof >= uint .alignof);
686737}
687738
739+ private template bytesHashWithExactSizeAndAlignment (SizeAndAlignType)
740+ {
741+ static if (SizeAndAlignType.alignof < uint .alignof
742+ ? SizeAndAlignType.sizeof <= 12
743+ : SizeAndAlignType.sizeof <= 10 )
744+ alias bytesHashWithExactSizeAndAlignment = smallBytesHash;
745+ else
746+ alias bytesHashWithExactSizeAndAlignment = bytesHashAlignedBy! SizeAndAlignType;
747+ }
748+
749+ // Fowler/Noll/Vo hash. http://www.isthe.com/chongo/tech/comp/fnv/
750+ private size_t fnv ()(scope const (ubyte )[] bytes, size_t seed) @nogc nothrow pure @safe
751+ {
752+ static if (size_t .max <= uint .max)
753+ enum prime = (1U << 24 ) + (1U << 8 ) + 0x93U ;
754+ else static if (size_t .max <= ulong .max)
755+ enum prime = (1UL << 40 ) + (1UL << 8 ) + 0xb3UL ;
756+ else
757+ enum prime = (size_t (1 ) << 88 ) + (size_t (1 ) << 8 ) + size_t (0x3b );
758+ foreach (b; bytes)
759+ seed = (seed ^ b) * prime;
760+ return seed;
761+ }
762+ private alias smallBytesHash = fnv;
763+
688764// -----------------------------------------------------------------------------
689765// Block read - if your platform needs to do endian-swapping or can only
690766// handle aligned reads, do the conversion here
0 commit comments