@@ -45,7 +45,7 @@ void COLL_ProjectPointToEdge(SVec3* out, const SVec3* v1, const SVec3* v2, const
4545}
4646
4747/* Address: 0x8001f2dc */
48- void COLL_CalculateTrianglePlane (const CollDCache * cache , CollVertex * v1 , const CollVertex * v2 , const CollVertex * v3 )
48+ static void COLL_CalculateTrianglePlane (const CollDCache * cache , CollVertex * v1 , const CollVertex * v2 , const CollVertex * v3 )
4949{
5050#ifdef TEST_COLL_IMPL
5151 CollVertex input = * v1 ;
@@ -70,14 +70,10 @@ void COLL_CalculateTrianglePlane(const CollDCache* cache, CollVertex* v1, const
7070 else if (magnitude == absNormal .y ) { v1 -> normalDominantAxis = AXIS_Y ; }
7171 else { v1 -> normalDominantAxis = AXIS_Z ; }
7272 TEST_COLL_CalculateTrianglePlane (cache , & input , v2 , v3 , v1 );
73- /* This is a hand written assembly function that breaks the ABI,
74- and some callers expect the argument registers to be untouched */
75- __asm__ volatile ("move $a0, %0" : : "r" ((u32 )cache ));
76- __asm__ volatile ("move $t9, %0" : : "r" ((u32 )cache -> currQuadblock ));
7773}
7874
7975/* Address: 0x8001f7f0 */
80- void COLL_LoadVerticeData (CollDCache * cache )
76+ static void COLL_LoadVerticeData (CollDCache * cache )
8177{
8278 const Quadblock * quadblock = cache -> currQuadblock ;
8379 const Vertex * vertices = cache -> meshInfo -> vertices ;
@@ -91,6 +87,52 @@ void COLL_LoadVerticeData(CollDCache* cache)
9187 cache -> quadblockThirdIndex = quadblock -> index [2 ];
9288 cache -> quadblockFourthIndex = quadblock -> index [3 ];
9389 TEST_COLL_LoadVerticeData (cache );
90+ }
91+
92+ /* Address: 0x8001f67c */
93+ void COLL_LoadQuadblockData_LowLOD (CollDCache * cache , Quadblock * quadblock )
94+ {
95+ COLL_LoadVerticeData (cache );
96+ cache -> lodShift = 2 ;
97+ cache -> normalBitshift = quadblock -> triNormalVecBitshift ;
98+ if (cache -> quadblockThirdIndex != cache -> quadblockFourthIndex )
99+ {
100+ cache -> normalScale = quadblock -> triNormalVecDividend [9 ];
101+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [1 ], & cache -> quadblockCollVertices [3 ], & cache -> quadblockCollVertices [2 ]);
102+ }
103+ cache -> normalScale = quadblock -> triNormalVecDividend [8 ];
104+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [0 ], & cache -> quadblockCollVertices [1 ], & cache -> quadblockCollVertices [2 ]);
105+ /* This is a hand written assembly function that breaks the ABI,
106+ and some callers expect the argument registers to be untouched */
107+ __asm__ volatile ("move $a0, %0" : : "r" ((u32 )cache ));
108+ __asm__ volatile ("move $t9, %0" : : "r" ((u32 )quadblock ));
109+ }
110+
111+ /* Address: 0x8001f6f0 */
112+ void COLL_LoadQuadblockData_HighLOD (CollDCache * cache , Quadblock * quadblock )
113+ {
114+ COLL_LoadVerticeData (cache );
115+ cache -> lodShift = 0 ;
116+ cache -> normalBitshift = quadblock -> triNormalVecBitshift ;
117+ if (cache -> quadblockThirdIndex != cache -> quadblockFourthIndex )
118+ {
119+ cache -> normalScale = quadblock -> triNormalVecDividend [4 ];
120+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [8 ], & cache -> quadblockCollVertices [6 ], & cache -> quadblockCollVertices [7 ]);
121+ cache -> normalScale = quadblock -> triNormalVecDividend [5 ];
122+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [7 ], & cache -> quadblockCollVertices [3 ], & cache -> quadblockCollVertices [8 ]);
123+ cache -> normalScale = quadblock -> triNormalVecDividend [6 ];
124+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [1 ], & cache -> quadblockCollVertices [7 ], & cache -> quadblockCollVertices [6 ]);
125+ cache -> normalScale = quadblock -> triNormalVecDividend [7 ];
126+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [2 ], & cache -> quadblockCollVertices [6 ], & cache -> quadblockCollVertices [8 ]);
127+ }
128+ cache -> normalScale = quadblock -> triNormalVecDividend [0 ];
129+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [0 ], & cache -> quadblockCollVertices [4 ], & cache -> quadblockCollVertices [5 ]);
130+ cache -> normalScale = quadblock -> triNormalVecDividend [1 ];
131+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [4 ], & cache -> quadblockCollVertices [6 ], & cache -> quadblockCollVertices [5 ]);
132+ cache -> normalScale = quadblock -> triNormalVecDividend [2 ];
133+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [6 ], & cache -> quadblockCollVertices [4 ], & cache -> quadblockCollVertices [1 ]);
134+ cache -> normalScale = quadblock -> triNormalVecDividend [3 ];
135+ COLL_CalculateTrianglePlane (cache , & cache -> quadblockCollVertices [5 ], & cache -> quadblockCollVertices [6 ], & cache -> quadblockCollVertices [2 ]);
94136 /* This is a hand written assembly function that breaks the ABI,
95137 and some callers expect the argument registers to be untouched */
96138 __asm__ volatile ("move $a0, %0" : : "r" ((u32 )cache ));
0 commit comments