@@ -173,18 +173,172 @@ int main(int argc, char** argv)
173
173
return 3 ;
174
174
}
175
175
176
+ struct SensorData
177
+ {
178
+ bool rightHandedCamera = true ;
179
+ uint32_t samplesNeeded = 0u ;
180
+ float moveSpeed = core::nan<float >();
181
+ scene::ICameraSceneNode * staticCamera;
182
+ scene::ICameraSceneNode * interactiveCamera;
183
+ };
184
+
176
185
auto smgr = device->getSceneManager ();
177
186
178
- // TODO: Move into renderer?
187
+ auto isOkSensorType = [](const ext::MitsubaLoader::CElementSensor& sensor) -> bool {
188
+ return sensor.type == ext::MitsubaLoader::CElementSensor::Type::PERSPECTIVE || sensor.type == ext::MitsubaLoader::CElementSensor::Type::THINLENS;
189
+ };
190
+
191
+ auto sensorCount = globalMeta->m_global .m_sensors .size ();
192
+
193
+ std::vector<SensorData> sensors = std::vector<SensorData>(sensorCount);
194
+
195
+ std::cout << " Total number of Sensors = " << sensorCount << std::endl;
196
+
197
+ if (sensorCount <= 0 )
198
+ {
199
+ std::cout << " [ERROR] No Sensors found." << std::endl;
200
+ assert (false );
201
+ return 5 ; // return code?
202
+ }
203
+
204
+ for (uint32_t s = 0u ; s < sensorCount; ++s)
205
+ {
206
+ std::cout << " Sensors[" << s << " ] = " << std::endl;
207
+ const auto & sensor = globalMeta->m_global .m_sensors [s];
208
+ const auto & film = sensor.film ;
209
+ auto & outSensorData = sensors[s];
210
+
211
+ if (!isOkSensorType (sensor))
212
+ {
213
+ std::cout << " \t Sensor(" << s << " ) Type is not valid" << std::endl;
214
+ continue ;
215
+ }
216
+
217
+ outSensorData.samplesNeeded = sensor.sampler .sampleCount ;
218
+ outSensorData.staticCamera = smgr->addCameraSceneNode (nullptr );
219
+ outSensorData.interactiveCamera = smgr->addCameraSceneNodeModifiedMaya (nullptr , -400 .0f , 20 .0f , 200 .0f , -1 , 2 .0f , 1 .0f , false , true );
220
+ auto & staticCamera = outSensorData.staticCamera ;
221
+ auto & interactiveCamera = outSensorData.interactiveCamera ;
222
+
223
+ // need to extract individual components
224
+ {
225
+ auto relativeTransform = sensor.transform .matrix .extractSub3x4 ();
226
+ if (relativeTransform.getPseudoDeterminant ().x < 0 .f )
227
+ outSensorData.rightHandedCamera = false ;
228
+ else
229
+ outSensorData.rightHandedCamera = true ;
230
+
231
+ std::cout << " \t IsRightHanded=" << ((outSensorData.rightHandedCamera ) ? " TRUE" : " FALSE" ) << std::endl;
232
+
233
+ auto pos = relativeTransform.getTranslation ();
234
+ staticCamera->setPosition (pos.getAsVector3df ());
235
+
236
+ std::cout << " \t Camera Position = <" << pos.x << " ," << pos.y << " ," << pos.z << " >" << std::endl;
237
+
238
+ auto tpose = core::transpose (sensor.transform .matrix );
239
+
240
+ auto up = tpose.rows [1 ];
241
+ core::vectorSIMDf view = tpose.rows [2 ];
242
+ auto target = view+pos;
243
+ staticCamera->setTarget (target.getAsVector3df ());
244
+
245
+ std::cout << " \t Camera Target = <" << target.x << " ," << target.y << " ," << target.z << " >" << std::endl;
246
+
247
+ if (core::dot (core::normalize (core::cross (staticCamera->getUpVector (),view)),core::cross (up,view)).x <0 .99f )
248
+ staticCamera->setUpVector (up);
249
+ }
250
+
251
+ const ext::MitsubaLoader::CElementSensor::PerspectivePinhole* persp = nullptr ;
252
+ switch (sensor.type )
253
+ {
254
+ case ext::MitsubaLoader::CElementSensor::Type::PERSPECTIVE:
255
+ persp = &sensor.perspective ;
256
+ std::cout << " \t Type = PERSPECTIVE" << std::endl;
257
+ break ;
258
+ case ext::MitsubaLoader::CElementSensor::Type::THINLENS:
259
+ persp = &sensor.thinlens ;
260
+ std::cout << " \t Type = THINLENS" << std::endl;
261
+ break ;
262
+ default :
263
+ assert (false );
264
+ break ;
265
+ }
266
+
267
+
268
+ outSensorData.moveSpeed = persp->moveSpeed ;
269
+ std::cout << " \t Camera Move Speed = " << outSensorData.moveSpeed << std::endl;
270
+
271
+ float realFoVDegrees;
272
+ auto width = film.cropWidth ;
273
+ auto height = film.cropHeight ;
274
+ float aspectRatio = float (width) / float (height);
275
+ auto convertFromXFoV = [=](float fov) -> float
276
+ {
277
+ float aspectX = tan (core::radians (fov)*0 .5f );
278
+ return core::degrees (atan (aspectX/aspectRatio)*2 .f );
279
+ };
280
+
281
+ switch (persp->fovAxis )
282
+ {
283
+ case ext::MitsubaLoader::CElementSensor::PerspectivePinhole::FOVAxis::X:
284
+ realFoVDegrees = convertFromXFoV (persp->fov );
285
+ break ;
286
+ case ext::MitsubaLoader::CElementSensor::PerspectivePinhole::FOVAxis::Y:
287
+ realFoVDegrees = persp->fov ;
288
+ break ;
289
+ case ext::MitsubaLoader::CElementSensor::PerspectivePinhole::FOVAxis::DIAGONAL:
290
+ {
291
+ float aspectDiag = tan (core::radians (persp->fov )*0 .5f );
292
+ float aspectY = aspectDiag/core::sqrt (1 .f +aspectRatio*aspectRatio);
293
+ realFoVDegrees = core::degrees (atan (aspectY)*2 .f );
294
+ }
295
+ break ;
296
+ case ext::MitsubaLoader::CElementSensor::PerspectivePinhole::FOVAxis::SMALLER:
297
+ if (width < height)
298
+ realFoVDegrees = convertFromXFoV (persp->fov );
299
+ else
300
+ realFoVDegrees = persp->fov ;
301
+ break ;
302
+ case ext::MitsubaLoader::CElementSensor::PerspectivePinhole::FOVAxis::LARGER:
303
+ if (width < height)
304
+ realFoVDegrees = persp->fov ;
305
+ else
306
+ realFoVDegrees = convertFromXFoV (persp->fov );
307
+ break ;
308
+ default :
309
+ realFoVDegrees = NAN;
310
+ assert (false );
311
+ break ;
312
+ }
313
+
314
+ // TODO: apply the crop offset
315
+ assert (film.cropOffsetX ==0 && film.cropOffsetY ==0 );
316
+ float nearClip = core::max (persp->nearClip , persp->farClip * 0.0001 );
317
+ if (outSensorData.rightHandedCamera )
318
+ staticCamera->setProjectionMatrix (core::matrix4SIMD::buildProjectionMatrixPerspectiveFovRH (core::radians (realFoVDegrees), aspectRatio, nearClip, persp->farClip ));
319
+ else
320
+ staticCamera->setProjectionMatrix (core::matrix4SIMD::buildProjectionMatrixPerspectiveFovLH (core::radians (realFoVDegrees), aspectRatio, nearClip, persp->farClip ));
321
+
322
+ core::vectorSIMDf cameraTarget = staticCamera->getTarget ();
323
+ core::vector3df cameraTargetVec3f (cameraTarget.x , cameraTarget.y , cameraTarget.z ); // I have to do this because of inconsistencies in using vectorSIMDf and vector3df in code most places.
324
+
325
+ interactiveCamera->setPosition (staticCamera->getPosition ());
326
+ interactiveCamera->setTarget (cameraTargetVec3f);
327
+ interactiveCamera->setUpVector (staticCamera->getUpVector ());
328
+ interactiveCamera->setLeftHanded (staticCamera->getLeftHanded ());
329
+ interactiveCamera->setProjectionMatrix (staticCamera->getProjectionMatrix ());
330
+
331
+ core::vectorSIMDf cameraPos; cameraPos.set (staticCamera->getPosition ());
332
+ auto modifiedMayaAnim = reinterpret_cast <scene::CSceneNodeAnimatorCameraModifiedMaya*>(interactiveCamera->getAnimators ()[0 ]);
333
+ modifiedMayaAnim->setZoomAndRotationBasedOnTargetAndPosition (cameraPos, cameraTarget);
334
+ }
335
+
336
+ auto camera = smgr->addCameraSceneNodeModifiedMaya (nullptr , -400 .0f , 20 .0f , 200 .0f , -1 , 2 .0f , 1 .0f , false , true );
337
+
179
338
bool rightHandedCamera = true ;
180
339
float moveSpeed = core::nan<float >();
181
340
uint32_t sensorSamplesNeeded = 0u ;
182
341
183
- auto camera = smgr->addCameraSceneNodeModifiedMaya (nullptr , -400 .0f , 20 .0f , 200 .0f , -1 , 2 .0f , 1 .0f , false , true );
184
-
185
- auto isOkSensorType = [](const ext::MitsubaLoader::CElementSensor& sensor) -> bool {
186
- return sensor.type == ext::MitsubaLoader::CElementSensor::Type::PERSPECTIVE || sensor.type == ext::MitsubaLoader::CElementSensor::Type::THINLENS;
187
- };
188
342
if (globalMeta->m_global .m_sensors .size () && isOkSensorType (globalMeta->m_global .m_sensors .front ()))
189
343
{
190
344
const auto & sensor = globalMeta->m_global .m_sensors .front ();
@@ -280,11 +434,6 @@ int main(int argc, char** argv)
280
434
camera->setFarValue (5000 .f );
281
435
}
282
436
283
- auto modifiedMayaAnim = reinterpret_cast <scene::CSceneNodeAnimatorCameraModifiedMaya*>(camera->getAnimators ()[0 ]);
284
- core::vectorSIMDf cameraPos; cameraPos.set (camera->getPosition ());
285
- core::vectorSIMDf cameraTarget; cameraTarget.set (camera->getTarget ());
286
- modifiedMayaAnim->setZoomAndRotationBasedOnTargetAndPosition (cameraPos, cameraTarget);
287
-
288
437
auto driver = device->getVideoDriver ();
289
438
290
439
core::smart_refctd_ptr<Renderer> renderer = core::make_smart_refctd_ptr<Renderer>(driver,device->getAssetManager (),smgr);
0 commit comments