@@ -875,10 +875,25 @@ static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
875
875
}
876
876
877
877
#if defined(CONFIG_PPC_PMAC ) && defined(CONFIG_PPC32 )
878
- #define cond_le32_to_cpu (v ) \
879
- (ohci->quirks & QUIRK_BE_HEADERS ? be32_to_cpu(v) : le32_to_cpu(v))
878
+ static u32 cond_le32_to_cpu (__le32 value , bool has_be_header_quirk )
879
+ {
880
+ return has_be_header_quirk ? be32_to_cpu (value ) : le32_to_cpu (value );
881
+ }
882
+
883
+ static bool has_be_header_quirk (const struct fw_ohci * ohci )
884
+ {
885
+ return !!(ohci -> quirks & QUIRK_BE_HEADERS );
886
+ }
880
887
#else
881
- #define cond_le32_to_cpu (v ) le32_to_cpu(v)
888
+ static u32 cond_le32_to_cpu (__le32 value , bool has_be_header_quirk __maybe_unused )
889
+ {
890
+ return le32_to_cpu (value );
891
+ }
892
+
893
+ static bool has_be_header_quirk (const struct fw_ohci * ohci )
894
+ {
895
+ return false;
896
+ }
882
897
#endif
883
898
884
899
static __le32 * handle_ar_packet (struct ar_context * ctx , __le32 * buffer )
@@ -888,9 +903,9 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
888
903
u32 status , length , tcode ;
889
904
int evt ;
890
905
891
- p .header [0 ] = cond_le32_to_cpu (buffer [0 ]);
892
- p .header [1 ] = cond_le32_to_cpu (buffer [1 ]);
893
- p .header [2 ] = cond_le32_to_cpu (buffer [2 ]);
906
+ p .header [0 ] = cond_le32_to_cpu (buffer [0 ], has_be_header_quirk ( ohci ) );
907
+ p .header [1 ] = cond_le32_to_cpu (buffer [1 ], has_be_header_quirk ( ohci ) );
908
+ p .header [2 ] = cond_le32_to_cpu (buffer [2 ], has_be_header_quirk ( ohci ) );
894
909
895
910
tcode = async_header_get_tcode (p .header );
896
911
switch (tcode ) {
@@ -902,7 +917,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
902
917
break ;
903
918
904
919
case TCODE_READ_BLOCK_REQUEST :
905
- p .header [3 ] = cond_le32_to_cpu (buffer [3 ]);
920
+ p .header [3 ] = cond_le32_to_cpu (buffer [3 ], has_be_header_quirk ( ohci ) );
906
921
p .header_length = 16 ;
907
922
p .payload_length = 0 ;
908
923
break ;
@@ -911,7 +926,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
911
926
case TCODE_READ_BLOCK_RESPONSE :
912
927
case TCODE_LOCK_REQUEST :
913
928
case TCODE_LOCK_RESPONSE :
914
- p .header [3 ] = cond_le32_to_cpu (buffer [3 ]);
929
+ p .header [3 ] = cond_le32_to_cpu (buffer [3 ], has_be_header_quirk ( ohci ) );
915
930
p .header_length = 16 ;
916
931
p .payload_length = async_header_get_data_length (p .header );
917
932
if (p .payload_length > MAX_ASYNC_PAYLOAD ) {
@@ -936,7 +951,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
936
951
937
952
/* FIXME: What to do about evt_* errors? */
938
953
length = (p .header_length + p .payload_length + 3 ) / 4 ;
939
- status = cond_le32_to_cpu (buffer [length ]);
954
+ status = cond_le32_to_cpu (buffer [length ], has_be_header_quirk ( ohci ) );
940
955
evt = (status >> 16 ) & 0x1f ;
941
956
942
957
p .ack = evt - 16 ;
@@ -2030,12 +2045,12 @@ static void bus_reset_work(struct work_struct *work)
2030
2045
return ;
2031
2046
}
2032
2047
2033
- generation = (cond_le32_to_cpu (ohci -> self_id [0 ]) >> 16 ) & 0xff ;
2048
+ generation = (cond_le32_to_cpu (ohci -> self_id [0 ], has_be_header_quirk ( ohci ) ) >> 16 ) & 0xff ;
2034
2049
rmb ();
2035
2050
2036
2051
for (i = 1 , j = 0 ; j < self_id_count ; i += 2 , j ++ ) {
2037
- u32 id = cond_le32_to_cpu (ohci -> self_id [i ]);
2038
- u32 id2 = cond_le32_to_cpu (ohci -> self_id [i + 1 ]);
2052
+ u32 id = cond_le32_to_cpu (ohci -> self_id [i ], has_be_header_quirk ( ohci ) );
2053
+ u32 id2 = cond_le32_to_cpu (ohci -> self_id [i + 1 ], has_be_header_quirk ( ohci ) );
2039
2054
2040
2055
if (id != ~id2 ) {
2041
2056
/*
0 commit comments