diff --git a/.gitignore b/.gitignore index dd2e1c7e4a..69ad3d8d94 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,10 @@ htmlcov xcuserdata/ .venv/ .cache +cmake-build-debug/ +bin/ +lib/ +pyvenv.cfg # Pycharm metadata .idea/ diff --git a/docs/tutorials/otio-serialized-schema-only-fields.md b/docs/tutorials/otio-serialized-schema-only-fields.md index b9496d9d99..d4efe3d8a9 100644 --- a/docs/tutorials/otio-serialized-schema-only-fields.md +++ b/docs/tutorials/otio-serialized-schema-only-fields.md @@ -128,6 +128,26 @@ parameters: ## Module: opentimelineio.schema +### AudioFade.1 + +parameters: +- *duration* +- *effect_name* +- *enabled* +- *fade_in* +- *metadata* +- *name* +- *start_time* + +### AudioVolume.1 + +parameters: +- *effect_name* +- *enabled* +- *gain* +- *metadata* +- *name* + ### Clip.2 parameters: @@ -252,9 +272,10 @@ parameters: - *metadata* - *name* -### Timeline.1 +### Timeline.2 parameters: +- *canvas_size* - *global_start_time* - *metadata* - *name* @@ -280,6 +301,121 @@ parameters: - *out_offset* - *transition_type* +### VideoBrightness.1 + +parameters: +- *brightness* +- *effect_name* +- *enabled* +- *metadata* +- *name* + +### VideoColorTemperature.1 + +parameters: +- *effect_name* +- *enabled* +- *metadata* +- *name* +- *temperature* + +### VideoContrast.1 + +parameters: +- *contrast* +- *effect_name* +- *enabled* +- *metadata* +- *name* + +### VideoCrop.1 + +parameters: +- *bottom* +- *effect_name* +- *enabled* +- *left* +- *metadata* +- *name* +- *right* +- *top* + +### VideoFlip.1 + +parameters: +- *effect_name* +- *enabled* +- *flip_horizontally* +- *flip_vertically* +- *metadata* +- *name* + +### VideoLightness.1 + +parameters: +- *effect_name* +- *enabled* +- *lightness* +- *metadata* +- *name* + +### VideoMask.1 + +parameters: +- *effect_name* +- *enabled* +- *mask_type* +- *mask_url* +- *metadata* +- *name* + +### VideoPosition.1 + +parameters: +- *effect_name* +- *enabled* +- *metadata* +- *name* +- *x* +- *y* + +### VideoRotate.1 + +parameters: +- *angle* +- *effect_name* +- *enabled* +- *metadata* +- *name* + +### VideoRoundedCorners.1 + +parameters: +- *effect_name* +- *enabled* +- *metadata* +- *name* +- *radius* + +### VideoSaturation.1 + +parameters: +- *effect_name* +- *enabled* +- *metadata* +- *name* +- *saturation* + +### VideoScale.1 + +parameters: +- *effect_name* +- *enabled* +- *height* +- *metadata* +- *name* +- *width* + ### SchemaDef.1 parameters: diff --git a/docs/tutorials/otio-serialized-schema.md b/docs/tutorials/otio-serialized-schema.md index 90706b3633..8bb7392ac8 100644 --- a/docs/tutorials/otio-serialized-schema.md +++ b/docs/tutorials/otio-serialized-schema.md @@ -274,6 +274,48 @@ parameters: ## Module: opentimelineio.schema +### AudioFade.1 + +*full module path*: `opentimelineio.schema.AudioFade` + +*documentation*: + +``` + +An effect that defines an audio fade. +If fade_in is true, audio is fading in from the start time for the duration +If fade_in is false, the audio is fading out from the start time for the duration + +``` + +parameters: +- *duration*: Fade duration +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *fade_in*: Fade direction +- *metadata*: +- *name*: +- *start_time*: Fade start time + +### AudioVolume.1 + +*full module path*: `opentimelineio.schema.AudioVolume` + +*documentation*: + +``` + +An effect that multiplies the audio volume by a given gain value + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *gain*: Gain multiplier +- *metadata*: +- *name*: + ### Clip.2 *full module path*: `opentimelineio.schema.Clip` @@ -614,7 +656,7 @@ parameters: - *metadata*: - *name*: -### Timeline.1 +### Timeline.2 *full module path*: `opentimelineio.schema.Timeline` @@ -625,6 +667,7 @@ None ``` parameters: +- *canvas_size*: - *global_start_time*: - *metadata*: - *name*: @@ -667,6 +710,243 @@ parameters: - *out_offset*: Amount of the next clip this transition overlaps, exclusive. - *transition_type*: Kind of transition, as defined by the :class:`Type` enum. +### VideoBrightness.1 + +*full module path*: `opentimelineio.schema.VideoBrightness` + +*documentation*: + +``` + +An effect that adjusts video brightness. + +``` + +parameters: +- *brightness*: Brightness value +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: + +### VideoColorTemperature.1 + +*full module path*: `opentimelineio.schema.VideoColorTemperature` + +*documentation*: + +``` + +An effect that adjusts video color temperature. + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: +- *temperature*: Color temperature value + +### VideoContrast.1 + +*full module path*: `opentimelineio.schema.VideoContrast` + +*documentation*: + +``` + +An effect that adjusts video contrast. + +``` + +parameters: +- *contrast*: Contrast value +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: + +### VideoCrop.1 + +*full module path*: `opentimelineio.schema.VideoCrop` + +*documentation*: + +``` + +An effect that crops video by a given amount of pixels on each side. + +``` + +parameters: +- *bottom*: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *left*: +- *metadata*: +- *name*: +- *right*: +- *top*: + +### VideoFlip.1 + +*full module path*: `opentimelineio.schema.VideoFlip` + +*documentation*: + +``` + +An effect that flips video horizontally or vertically. + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *flip_horizontally*: +- *flip_vertically*: +- *metadata*: +- *name*: + +### VideoLightness.1 + +*full module path*: `opentimelineio.schema.VideoLightness` + +*documentation*: + +``` + +An effect that adjusts video lightness. + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *lightness*: Lightness value +- *metadata*: +- *name*: + +### VideoMask.1 + +*full module path*: `opentimelineio.schema.VideoMask` + +*documentation*: + +``` + +An effect that applies a mask to a video + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *mask_type*: +- *mask_url*: +- *metadata*: +- *name*: + +### VideoPosition.1 + +*full module path*: `opentimelineio.schema.VideoPosition` + +*documentation*: + +``` + +An effect that positions video by a given offset in the frame. +The position is the location of the top left of the image on the canvas + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: +- *x*: +- *y*: + +### VideoRotate.1 + +*full module path*: `opentimelineio.schema.VideoRotate` + +*documentation*: + +``` + +An effect that rotates video by a given amount. +The rotation is specified in degrees clockwise. + +``` + +parameters: +- *angle*: Rotation angle in degrees clockwise +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: + +### VideoRoundedCorners.1 + +*full module path*: `opentimelineio.schema.VideoRoundCorners` + +*documentation*: + +``` + +An effect that rounds the corners of a video + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: +- *radius*: Radius of the corners + +### VideoSaturation.1 + +*full module path*: `opentimelineio.schema.VideoSaturation` + +*documentation*: + +``` + +An effect that adjusts video saturation. + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *metadata*: +- *name*: +- *saturation*: Saturation value + +### VideoScale.1 + +*full module path*: `opentimelineio.schema.VideoScale` + +*documentation*: + +``` + +An effect that scales video to the given dimensions. + +``` + +parameters: +- *effect_name*: +- *enabled*: If true, the Effect is applied. If false, the Effect is omitted. +- *height*: Height to scale to +- *metadata*: +- *name*: +- *width*: Width to scale to + ### SchemaDef.1 *full module path*: `opentimelineio.schema.SchemaDef` diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 0000000000..15e412d188 --- /dev/null +++ b/src/.gitignore @@ -0,0 +1 @@ +ts-opentimelineio/ diff --git a/src/opentimelineio/CMakeLists.txt b/src/opentimelineio/CMakeLists.txt index cf5190c57a..3371ba5194 100644 --- a/src/opentimelineio/CMakeLists.txt +++ b/src/opentimelineio/CMakeLists.txt @@ -5,6 +5,7 @@ set(OPENTIMELINEIO_HEADER_FILES anyDictionary.h anyVector.h clip.h + colorManagementEffects.h composable.h composition.h deserialization.h @@ -32,14 +33,17 @@ set(OPENTIMELINEIO_HEADER_FILES timeline.h track.h trackAlgorithm.h + transformEffects.h transition.h typeRegistry.h unknownSchema.h vectorIndexing.h - version.h) + version.h + volumeEffects.h) add_library(opentimelineio ${OTIO_SHARED_OR_STATIC_LIB} clip.cpp + colorManagementEffects.cpp composable.cpp composition.cpp deserialization.cpp @@ -69,9 +73,11 @@ add_library(opentimelineio ${OTIO_SHARED_OR_STATIC_LIB} timeline.cpp track.cpp trackAlgorithm.cpp + transformEffects.cpp transition.cpp typeRegistry.cpp - unknownSchema.cpp + unknownSchema.cpp + volumeEffects.cpp CORE_VERSION_MAP.cpp ${OPENTIMELINEIO_HEADER_FILES}) diff --git a/src/opentimelineio/CORE_VERSION_MAP.cpp b/src/opentimelineio/CORE_VERSION_MAP.cpp index 1043902253..2a5d5d92cd 100644 --- a/src/opentimelineio/CORE_VERSION_MAP.cpp +++ b/src/opentimelineio/CORE_VERSION_MAP.cpp @@ -145,6 +145,8 @@ const label_to_schema_version_map CORE_VERSION_MAP{ { "0.18.0.dev1", { { "Adapter", 1 }, + { "AudioFade", 1 }, + { "AudioVolume", 1 }, { "Clip", 2 }, { "Composable", 1 }, { "Composition", 1 }, @@ -169,10 +171,22 @@ const label_to_schema_version_map CORE_VERSION_MAP{ { "Stack", 1 }, { "Test", 1 }, { "TimeEffect", 1 }, - { "Timeline", 1 }, + { "Timeline", 2 }, { "Track", 1 }, { "Transition", 1 }, { "UnknownSchema", 1 }, + { "VideoBrightness", 1 }, + { "VideoColorTemperature", 1 }, + { "VideoContrast", 1 }, + { "VideoCrop", 1 }, + { "VideoFlip", 1 }, + { "VideoLightness", 1 }, + { "VideoMask", 1 }, + { "VideoPosition", 1 }, + { "VideoRotate", 1 }, + { "VideoRoundedCorners", 1 }, + { "VideoSaturation", 1 }, + { "VideoScale", 1 }, } }, // {next} }; diff --git a/src/opentimelineio/colorManagementEffects.cpp b/src/opentimelineio/colorManagementEffects.cpp new file mode 100644 index 0000000000..642894208b --- /dev/null +++ b/src/opentimelineio/colorManagementEffects.cpp @@ -0,0 +1,60 @@ +#include "opentimelineio/colorManagementEffects.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { + +bool VideoBrightness::read_from(Reader &reader) +{ + return reader.read("brightness", &_brightness) + && Parent::read_from(reader); +} + +void VideoBrightness::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("brightness", _brightness); +} + +bool VideoContrast::read_from(Reader &reader) +{ + return reader.read("contrast", &_contrast) + && Parent::read_from(reader); +} + +void VideoContrast::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("contrast", _contrast); +} + +bool VideoSaturation::read_from(Reader &reader) +{ + return reader.read("saturation", &_saturation) + && Parent::read_from(reader); +} + +void VideoSaturation::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("saturation", _saturation); +} + +bool VideoLightness::read_from(Reader &reader) +{ + return reader.read("lightness", &_lightness) + && Parent::read_from(reader); +} + +void VideoLightness::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("lightness", _lightness); +} + +bool VideoColorTemperature::read_from(Reader &reader) +{ + return reader.read("temperature", &_temperature) + && Parent::read_from(reader); +} + +void VideoColorTemperature::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("temperature", _temperature); +} + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/colorManagementEffects.h b/src/opentimelineio/colorManagementEffects.h new file mode 100644 index 0000000000..2586c034e1 --- /dev/null +++ b/src/opentimelineio/colorManagementEffects.h @@ -0,0 +1,158 @@ +#pragma once + +#include "opentimelineio/effect.h" +#include "opentimelineio/version.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { + +/// @brief A brightness effect +class VideoBrightness : public Effect +{ +public: + struct Schema { + static auto constexpr name = "VideoBrightness"; + static int constexpr version = 1; + }; + using Parent = Effect; + + VideoBrightness( + std::string const& name = std::string(), + double brightness = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _brightness(brightness) + {} + + double brightness() const noexcept { return _brightness; } + void set_brightness(double brightness) noexcept { _brightness = brightness; } + +protected: + virtual ~VideoBrightness() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _brightness; +}; + +/// @brief A contrast effect +class VideoContrast : public Effect +{ +public: + struct Schema { + static auto constexpr name = "VideoContrast"; + static int constexpr version = 1; + }; + using Parent = Effect; + + VideoContrast( + std::string const& name = std::string(), + double contrast = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _contrast(contrast) + {} + + double contrast() const noexcept { return _contrast; } + void set_contrast(double contrast) noexcept { _contrast = contrast; } + +protected: + virtual ~VideoContrast() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + int64_t _contrast; +}; + +/// @brief A saturation effect +class VideoSaturation : public Effect +{ +public: + struct Schema { + static auto constexpr name = "VideoSaturation"; + static int constexpr version = 1; + }; + using Parent = Effect; + + VideoSaturation( + std::string const& name = std::string(), + double saturation = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _saturation(saturation) + {} + + double saturation() const noexcept { return _saturation; } + void set_saturation(double saturation) noexcept { _saturation = saturation; } + +protected: + virtual ~VideoSaturation() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _saturation; +}; + +/// @brief A lightness effect +class VideoLightness : public Effect +{ +public: + struct Schema { + static auto constexpr name = "VideoLightness"; + static int constexpr version = 1; + }; + using Parent = Effect; + + VideoLightness( + std::string const& name = std::string(), + double lightness = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _lightness(lightness) + {} + + double lightness() const noexcept { return _lightness; } + void set_lightness(double lightness) noexcept { _lightness = lightness; } + +protected: + virtual ~VideoLightness() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _lightness; +}; + +/// @brief A color temperature effect +class VideoColorTemperature : public Effect +{ +public: + struct Schema { + static auto constexpr name = "VideoColorTemperature"; + static int constexpr version = 1; + }; + using Parent = Effect; + + VideoColorTemperature( + std::string const& name = std::string(), + double temperature = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _temperature(temperature) + {} + + double temperature() const noexcept { return _temperature; } + void set_temperature(double temperature) noexcept { _temperature = temperature; } + +protected: + virtual ~VideoColorTemperature() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _temperature; +}; + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/composition.cpp b/src/opentimelineio/composition.cpp index 57dfc4c0b1..c8f4ed4e63 100644 --- a/src/opentimelineio/composition.cpp +++ b/src/opentimelineio/composition.cpp @@ -98,6 +98,7 @@ Composition::insert_child( } _child_set.insert(child); + invalidate_cache(); return true; } @@ -130,6 +131,7 @@ Composition::set_child(int index, Composable* child, ErrorStatus* error_status) child->_set_parent(this); _children[index] = child; _child_set.insert(child); + invalidate_cache(); } return true; } @@ -161,6 +163,7 @@ Composition::remove_child(int index, ErrorStatus* error_status) _children[index]->_set_parent(nullptr); _children.erase(_children.begin() + index); } + invalidate_cache(); return true; } @@ -337,7 +340,7 @@ Composition::range_of_child(Composable const* child, ErrorStatus* error_status) result_range = TimeRange( result_range->start_time() + parent_range.start_time(), - result_range->duration()); + std::min(result_range->duration(), parent_range.duration())); current = parent; } @@ -386,9 +389,26 @@ Composition::trimmed_range_of_child( continue; } - result_range = TimeRange( - result_range->start_time() + parent_range.start_time(), - result_range->duration()); + auto untrimmed_range = + parent->range_of_child_at_index(index, error_status); + if (is_error(error_status)) + { + return TimeRange(); + } + + auto current_trimmed_range = + static_cast(current)->trimmed_range(); + auto untrimmed_start_time = + result_range->start_time() - current_trimmed_range.start_time() + untrimmed_range.start_time(); + + auto start_time = + std::max(untrimmed_start_time, parent_range.start_time()); + result_range = TimeRange::range_from_start_end_time( + start_time, + std::min( + untrimmed_start_time + result_range->duration(), + parent_range.end_time_exclusive())); + current = parent; } if (!source_range()) diff --git a/src/opentimelineio/composition.h b/src/opentimelineio/composition.h index b65936ab1b..a0f5a66394 100644 --- a/src/opentimelineio/composition.h +++ b/src/opentimelineio/composition.h @@ -158,6 +158,9 @@ class Composition : public Item std::optional search_range = std::nullopt, bool shallow_search = false) const; + + virtual void invalidate_cache() const {}; + protected: virtual ~Composition(); diff --git a/src/opentimelineio/deserialization.cpp b/src/opentimelineio/deserialization.cpp index 9090e23ade..a4fb24fddd 100644 --- a/src/opentimelineio/deserialization.cpp +++ b/src/opentimelineio/deserialization.cpp @@ -800,6 +800,14 @@ SerializableObject::Reader::read( return _read_optional(key, value); } +bool +SerializableObject::Reader::read( + std::string const& key, + std::optional* value) +{ + return _read_optional(key, value); +} + bool SerializableObject::Reader::read( std::string const& key, @@ -824,6 +832,14 @@ SerializableObject::Reader::read( return _read_optional(key, value); } +bool +SerializableObject::Reader::read( + std::string const& key, + std::optional* value) +{ + return _read_optional(key, value); +} + bool SerializableObject::Reader::read( std::string const& key, diff --git a/src/opentimelineio/serializableObject.h b/src/opentimelineio/serializableObject.h index 63b096a038..8daae71a81 100644 --- a/src/opentimelineio/serializableObject.h +++ b/src/opentimelineio/serializableObject.h @@ -134,9 +134,12 @@ class SerializableObject bool read(std::string const& key, std::optional* dest); bool read(std::string const& key, std::optional* dest); bool read(std::string const& key, std::optional* dest); + bool read(std::string const& key, std::optional* dest); bool read(std::string const& key, std::optional* dest); bool read(std::string const& key, std::optional* dest); bool read(std::string const& key, std::optional* dest); + bool read(std::string const& key, + std::optional* value); bool read( std::string const& key, std::optional* value); @@ -451,6 +454,9 @@ class SerializableObject void write(std::string const& key, IMATH_NAMESPACE::Box2d value); void write(std::string const& key, std::optional value); void write(std::string const& key, std::optional value); + void write( + std::string const& key, + std::optional value); void write( std::string const& key, std::optional value); diff --git a/src/opentimelineio/serialization.cpp b/src/opentimelineio/serialization.cpp index 0b9b2de2ba..71918672be 100644 --- a/src/opentimelineio/serialization.cpp +++ b/src/opentimelineio/serialization.cpp @@ -924,6 +924,15 @@ SerializableObject::Writer::write( value ? _encoder.write_value(*value) : _encoder.write_null_value(); } +void +SerializableObject::Writer::write( + std::string const& key, + std::optional value) +{ + _encoder_write_key(key); + value ? _encoder.write_value(*value) : _encoder.write_null_value(); +} + void SerializableObject::Writer::write(std::string const& key, TimeTransform value) { diff --git a/src/opentimelineio/stack.cpp b/src/opentimelineio/stack.cpp index 070d8944e1..26e86782f0 100644 --- a/src/opentimelineio/stack.cpp +++ b/src/opentimelineio/stack.cpp @@ -42,12 +42,18 @@ TimeRange Stack::range_of_child_at_index(int index, ErrorStatus* error_status) const { index = adjusted_vector_index(index, children()); + auto it = _childRangesCacche.find(index); + if (it != _childRangesCacche.end()) { + return it->second; + } + if (index < 0 || index >= int(children().size())) { if (error_status) { *error_status = ErrorStatus::ILLEGAL_INDEX; } + _childRangesCacche[index] = TimeRange(); return TimeRange(); } @@ -55,10 +61,13 @@ Stack::range_of_child_at_index(int index, ErrorStatus* error_status) const auto duration = child->duration(error_status); if (is_error(error_status)) { + _childRangesCacche[index] = TimeRange(); return TimeRange(); } - return TimeRange(RationalTime(0, duration.rate()), duration); + auto result = TimeRange(RationalTime(0, duration.rate()), duration); + _childRangesCacche[index] = result; + return result; } std::map @@ -118,9 +127,14 @@ Stack::trimmed_range_of_child_at_index(int index, ErrorStatus* error_status) TimeRange Stack::available_range(ErrorStatus* error_status) const { + if (_availableRangeCache.has_value()) { + return _availableRangeCache.value(); + } + if (children().empty()) { - return TimeRange(); + _availableRangeCache = TimeRange(); + return _availableRangeCache.value(); } auto duration = children()[0].value->duration(error_status); @@ -130,7 +144,8 @@ Stack::available_range(ErrorStatus* error_status) const std::max(duration, children()[i].value->duration(error_status)); } - return TimeRange(RationalTime(0, duration.rate()), duration); + _availableRangeCache = TimeRange(RationalTime(0, duration.rate()), duration); + return _availableRangeCache.value(); } std::vector> @@ -174,4 +189,10 @@ Stack::available_image_bounds(ErrorStatus* error_status) const return box; } +void +Stack::invalidate_cache() const +{ + _availableRangeCache = std::nullopt; + _childRangesCacche.clear(); +} }} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/stack.h b/src/opentimelineio/stack.h index 3894d91ad4..6c44368807 100644 --- a/src/opentimelineio/stack.h +++ b/src/opentimelineio/stack.h @@ -69,6 +69,9 @@ class Stack : public Composition std::optional const& search_range = std::nullopt, bool shallow_search = false) const; + + + void invalidate_cache() const override; protected: virtual ~Stack(); @@ -76,6 +79,9 @@ class Stack : public Composition bool read_from(Reader&) override; void write_to(Writer&) const override; + + mutable std::unordered_map _childRangesCacche; + mutable std::optional _availableRangeCache; }; }} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/timeline.cpp b/src/opentimelineio/timeline.cpp index d2f38571e8..30e4c831d3 100644 --- a/src/opentimelineio/timeline.cpp +++ b/src/opentimelineio/timeline.cpp @@ -7,11 +7,13 @@ namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { Timeline::Timeline( - std::string const& name, - std::optional global_start_time, - AnyDictionary const& metadata) + std::string const& name, + std::optional global_start_time, + std::optional canvas_size, + AnyDictionary const& metadata) : SerializableObjectWithMetadata(name, metadata) , _global_start_time(global_start_time) + , _canvas_size(canvas_size) , _tracks(new Stack("tracks")) {} @@ -29,6 +31,7 @@ Timeline::read_from(Reader& reader) { return reader.read("tracks", &_tracks) && reader.read_if_present("global_start_time", &_global_start_time) + && reader.read_if_present("canvas_size", &_canvas_size) && Parent::read_from(reader); } @@ -37,6 +40,7 @@ Timeline::write_to(Writer& writer) const { Parent::write_to(writer); writer.write("global_start_time", _global_start_time); + writer.write("canvas_size", _canvas_size); writer.write("tracks", _tracks); } @@ -86,4 +90,27 @@ Timeline::find_clips( shallow_search); } +void +Timeline::invalidate_cache() const +{ + std::stack stack; + stack.push(_tracks.value); + while (!stack.empty()) + { + auto composition = stack.top(); + composition->invalidate_cache(); + + stack.pop(); + + for (auto child : composition->children()) + { + auto* next_composition = dynamic_cast(child.value); + if (next_composition) + { + stack.push(next_composition); + } + } + } +} + }} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/timeline.h b/src/opentimelineio/timeline.h index d18c629d83..6928cc2ac2 100644 --- a/src/opentimelineio/timeline.h +++ b/src/opentimelineio/timeline.h @@ -20,7 +20,7 @@ class Timeline : public SerializableObjectWithMetadata struct Schema { static auto constexpr name = "Timeline"; - static int constexpr version = 1; + static int constexpr version = 2; }; using Parent = SerializableObjectWithMetadata; @@ -29,11 +29,13 @@ class Timeline : public SerializableObjectWithMetadata /// /// @param name The timeline name. /// @param global_start_time The global start time of the timeline. + /// @param canvas_size The dimensions of the target canvas /// @param metadata The metadata for the timeline. Timeline( - std::string const& name = std::string(), - std::optional global_start_time = std::nullopt, - AnyDictionary const& metadata = AnyDictionary()); + std::string const& name = std::string(), + std::optional global_start_time = std::nullopt, + std::optional canvas_size = std::nullopt, + AnyDictionary const& metadata = AnyDictionary()); /// @brief Return the timeline stack. Stack* tracks() const noexcept { return _tracks; } @@ -59,6 +61,19 @@ class Timeline : public SerializableObjectWithMetadata _global_start_time = global_start_time; } + /// @brief Return the canvas size + std::optional canvas_size() const noexcept + { + return _canvas_size; + } + + /// @brief Set the canvas size + void + set_canvas_size(std::optional const& canvas_size) + { + _canvas_size = canvas_size; + } + /// @brief Return the duration of the timeline. RationalTime duration(ErrorStatus* error_status = nullptr) const { @@ -109,6 +124,9 @@ class Timeline : public SerializableObjectWithMetadata return _tracks.value->available_image_bounds(error_status); } + /// @brief Invalidate the cache. + void invalidate_cache() const; + protected: virtual ~Timeline(); @@ -116,8 +134,9 @@ class Timeline : public SerializableObjectWithMetadata void write_to(Writer&) const override; private: - std::optional _global_start_time; - Retainer _tracks; + std::optional _global_start_time; + std::optional _canvas_size; + Retainer _tracks; }; template diff --git a/src/opentimelineio/track.cpp b/src/opentimelineio/track.cpp index 3f81f27148..26e1abb39e 100644 --- a/src/opentimelineio/track.cpp +++ b/src/opentimelineio/track.cpp @@ -45,12 +45,18 @@ TimeRange Track::range_of_child_at_index(int index, ErrorStatus* error_status) const { index = adjusted_vector_index(index, children()); + auto it = _childRangesCacche.find(index); + if (it != _childRangesCacche.end()) { + return it->second; + } + if (index < 0 || index >= int(children().size())) { if (error_status) { *error_status = ErrorStatus::ILLEGAL_INDEX; } + _childRangesCacche[index] = TimeRange(); return TimeRange(); } @@ -58,6 +64,7 @@ Track::range_of_child_at_index(int index, ErrorStatus* error_status) const RationalTime child_duration = child->duration(error_status); if (is_error(error_status)) { + _childRangesCacche[index] = TimeRange(); return TimeRange(); } @@ -72,6 +79,7 @@ Track::range_of_child_at_index(int index, ErrorStatus* error_status) const } if (is_error(error_status)) { + _childRangesCacche[index] = TimeRange(); return TimeRange(); } } @@ -81,7 +89,9 @@ Track::range_of_child_at_index(int index, ErrorStatus* error_status) const start_time -= transition->in_offset(); } - return TimeRange(start_time, child_duration); + auto result = TimeRange(start_time, child_duration); + _childRangesCacche[index] = result; + return result; } TimeRange @@ -110,6 +120,10 @@ Track::trimmed_range_of_child_at_index(int index, ErrorStatus* error_status) TimeRange Track::available_range(ErrorStatus* error_status) const { + if (_availableRangeCache.has_value()) { + return _availableRangeCache.value(); + } + RationalTime duration; for (const auto& child: children()) { @@ -118,7 +132,8 @@ Track::available_range(ErrorStatus* error_status) const duration += item->duration(error_status); if (is_error(error_status)) { - return TimeRange(); + _availableRangeCache = TimeRange(); + return _availableRangeCache.value(); } } } @@ -137,7 +152,8 @@ Track::available_range(ErrorStatus* error_status) const } } - return TimeRange(RationalTime(0, duration.rate()), duration); + _availableRangeCache = TimeRange(RationalTime(0, duration.rate()), duration); + return _availableRangeCache.value(); } std::pair, std::optional> @@ -305,4 +321,11 @@ Track::available_image_bounds(ErrorStatus* error_status) const return box; } +void +Track::invalidate_cache() const +{ + _availableRangeCache = std::nullopt; + _childRangesCacche.clear(); +} + }} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/track.h b/src/opentimelineio/track.h index 177a982a8d..18f0ae47f7 100644 --- a/src/opentimelineio/track.h +++ b/src/opentimelineio/track.h @@ -92,6 +92,8 @@ class Track : public Composition std::optional const& search_range = std::nullopt, bool shallow_search = false) const; + void invalidate_cache() const override; + protected: virtual ~Track(); @@ -102,6 +104,8 @@ class Track : public Composition private: std::string _kind; + mutable std::unordered_map _childRangesCacche; + mutable std::optional _availableRangeCache; }; }} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/transformEffects.cpp b/src/opentimelineio/transformEffects.cpp new file mode 100644 index 0000000000..d5b0b85ffa --- /dev/null +++ b/src/opentimelineio/transformEffects.cpp @@ -0,0 +1,118 @@ +#include "opentimelineio/transformEffects.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { +bool VideoScale::read_from(Reader &reader) +{ + return reader.read("width", &_width) + && reader.read("height", &_height) + && Parent::read_from(reader); +} + +void VideoScale::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("width", _width); + writer.write("height", _height); +} + +bool VideoCrop::read_from(Reader &reader) +{ + return reader.read("left", &_left) + && reader.read("right", &_right) + && reader.read("top", &_top) + && reader.read("bottom", &_bottom) + && Parent::read_from(reader); +} + +void VideoCrop::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("left", _left); + writer.write("right", _right); + writer.write("top", _top); + writer.write("bottom", _bottom); +} + +bool VideoPosition::read_from(Reader &reader) +{ + return reader.read("x", &_x) + && reader.read("y", &_y) + && Parent::read_from(reader); +} + +void VideoPosition::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("x", _x); + writer.write("y", _y); +} + +bool VideoRotate::read_from(Reader &reader) +{ + return reader.read("angle", &_angle) + && Parent::read_from(reader); +} + +void VideoRotate::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("angle", _angle); +} + +bool VideoRoundedCorners::read_from(Reader &reader) +{ + return reader.read("radius", &_radius) + && Parent::read_from(reader); +} + +void VideoRoundedCorners::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("radius", _radius); +} + +bool VideoFlip::read_from(Reader &reader) +{ + return reader.read("flip_horizontally", &_flip_horizontally) + && reader.read("flip_vertically", &_flip_vertically) + && Parent::read_from(reader); +} + +void VideoFlip::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("flip_horizontally", _flip_horizontally); + writer.write("flip_vertically", _flip_vertically); +} + +bool VideoMask::read_from(Reader &reader) +{ + bool result = reader.read("mask_type", &_mask_type) + && reader.read("mask_url", &_mask_url) + && reader.read_if_present("mask_replacement_url", &_mask_replacement_url) + && reader.read_if_present("blur_radius", &_blur_radius) + && Parent::read_from(reader); + + if (result) { + // Check optionals are present for the mask type + if (_mask_type == MaskType::replace) { + if (!_mask_replacement_url) { + return false; + } + } else if (_mask_type == MaskType::blur) { + if (!_blur_radius) { + return false; + } + } + } + + return result; +} + +void VideoMask::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("mask_type", _mask_type); + writer.write("mask_url", _mask_url); + if (_mask_replacement_url) { + writer.write("mask_replacement_url", _mask_replacement_url.value()); + } + if (_blur_radius) { + writer.write("blur_radius", _blur_radius.value()); + } +} + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/transformEffects.h b/src/opentimelineio/transformEffects.h new file mode 100644 index 0000000000..86b78bc546 --- /dev/null +++ b/src/opentimelineio/transformEffects.h @@ -0,0 +1,331 @@ +#pragma once + +#include "opentimelineio/effect.h" +#include "opentimelineio/version.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { + +/// @brief An scaling effect +class VideoScale : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "VideoScale"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new scaling effect. + /// + /// @param name The name of the effect object. + /// @param width How much to scale the width by. + /// @param height How much to scale the height by. + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoScale( + std::string const& name = std::string(), + int64_t width = 0, + int64_t height = 0, + AnyDictionary const& metadata = AnyDictionary()) + : Effect(name, Schema::name, metadata) + , _width(width) + , _height(height) + {} + + int64_t width() const noexcept { return _width; } + int64_t height() const noexcept { return _height; } + + void set_width(int64_t width) noexcept { _width = width; } + void set_height(int64_t height) noexcept { _height = height; } + +protected: + + virtual ~VideoScale() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + int64_t _width; ///< The scaled width + int64_t _height; ///< The scaled height +}; + +/// @brief An crop effect +class VideoCrop : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "VideoCrop"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new crop effect. + /// + /// @param name The name of the effect object. + /// @param left The amount to crop from the left. + /// @param right The amount to crop from the right. + /// @param top The amount to crop from the top. + /// @param bottom The amount to crop from the bottom. + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoCrop( + std::string const& name = std::string(), + int64_t left = 0, + int64_t right = 0, + int64_t top = 0, + int64_t bottom = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _left(left) + , _right(right) + , _top(top) + , _bottom(bottom) + {} + + int64_t left() const noexcept { return _left; } + int64_t right() const noexcept { return _right; } + int64_t top() const noexcept { return _top; } + int64_t bottom() const noexcept { return _bottom; } + + void set_left(int64_t left) noexcept { _left = left; } + void set_right(int64_t right) noexcept { _right = right; } + void set_top(int64_t top) noexcept { _top = top; } + void set_bottom(int64_t bottom) noexcept { _bottom = bottom; } + +protected: + virtual ~VideoCrop() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + int64_t _left; ///< The amount to crop from the left. + int64_t _right; ///< The amount to crop from the right. + int64_t _top; ///< The amount to crop from the top. + int64_t _bottom; ///< The amount to crop from the bottom. +}; + +/// @brief An position effect +class VideoPosition : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "VideoPosition"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new position effect. + /// + /// @param name The name of the effect object. + /// @param x Distance of top left corner from left edge of canvas + /// @param y Distance of top left corner from top edge of canvas + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoPosition( + std::string const& name = std::string(), + int64_t x = 0, + int64_t y = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _x(x) + , _y(y) + {} + + int64_t x() const noexcept { return _x; } + int64_t y() const noexcept { return _y; } + + void set_x(int64_t x) noexcept { _x = x; } + void set_y(int64_t y) noexcept { _y = y; } + +protected: + virtual ~VideoPosition() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + int64_t _x; ///< The horizontal position. + int64_t _y; ///< The vertical position. +}; + +/// @brief An rotation effect +class VideoRotate : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "VideoRotate"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new rotation effect. + /// + /// @param name The name of the effect object. + /// @param angle The amount of rotation, degrees clockwise + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoRotate( + std::string const& name = std::string(), + double angle = 0.0, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _angle(angle) + {} + + double angle() const noexcept { return _angle; } + void set_angle(double angle) noexcept { _angle = angle; } + +protected: + virtual ~VideoRotate() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _angle; ///< The angle of rotation, degrees clockwise +}; + +/// @brief A rounded corner effect +class VideoRoundedCorners : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "VideoRoundedCorners"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new rounded corner effect. + /// + /// @param name The name of the effect object. + /// @param radius The corner radius. + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoRoundedCorners( + std::string const& name = std::string(), + int64_t const radius = 0, + AnyDictionary const& metadata = AnyDictionary(), + bool const enabled = true) + : Effect(name, Schema::name, metadata, enabled), + _radius(radius) + {} + + int64_t radius() const noexcept { return _radius; } + void set_radius(int64_t radius) noexcept { _radius = radius; } + +protected: + ~VideoRoundedCorners() override = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + int64_t _radius; +}; + +/// @brief A flip effect +class VideoFlip : public Effect +{ +public: + struct Schema + { + static auto constexpr name = "VideoFlip"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new flip effect. + /// + /// @param name The name of the effect object. + /// @param flip_horizontally Whether to flip horizontally. + /// @param flip_vertically Whether to flip vertically. + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + VideoFlip( + std::string const& name = std::string(), + bool flip_horizontally = false, + bool flip_vertically = false, + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _flip_horizontally(flip_horizontally) + , _flip_vertically(flip_vertically) + {} + + bool flip_horizontally() const noexcept { return _flip_horizontally; } + void set_flip_horizontally(bool flip_horizontally) noexcept { _flip_horizontally = flip_horizontally; } + + bool flip_vertically() const noexcept { return _flip_vertically; } + void set_flip_vertically(bool flip_vertically) noexcept { _flip_vertically = flip_vertically; } + +protected: + virtual ~VideoFlip() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + bool _flip_horizontally; ///< Whether to flip horizontally + bool _flip_vertically; ///< Whether to flip vertically +}; + +class VideoMask : public Effect +{ +public: + + struct MaskType + { + static auto constexpr remove = "REMOVE"; + static auto constexpr replace = "REPLACE"; + static auto constexpr blur = "BLUR"; + }; + + struct Schema + { + static auto constexpr name = "VideoMask"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + VideoMask( + std::string const& name = std::string(), + std::string const& mask_type = MaskType::remove, + std::string const& mask_url = std::string(), + AnyDictionary const& metadata = AnyDictionary(), + bool enabled = true) + : Effect(name, Schema::name, metadata, enabled) + , _mask_type(mask_type) + , _mask_url(mask_url) + {} + + std::string mask_type() const noexcept { return _mask_type; } + void set_mask_type(std::string mask_type) noexcept { _mask_type = mask_type; } + std::string mask_url() const noexcept { return _mask_url; } + void set_mask_url(std::string mask_url) noexcept { _mask_url = mask_url; } + std::optional mask_replacement_url() const noexcept { return _mask_replacement_url; } + void set_mask_replacement_url(std::optional mask_replacement_url) noexcept { _mask_replacement_url = mask_replacement_url; } + std::optional blur_radius() const noexcept { return _blur_radius; } + void set_blur_radius(std::optional blur_radius) noexcept { _blur_radius = blur_radius; } + +protected: + virtual ~VideoMask() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + std::string _mask_type; + std::string _mask_url; + std::optional _mask_replacement_url; + std::optional _blur_radius; +}; + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/typeRegistry.cpp b/src/opentimelineio/typeRegistry.cpp index 7254079496..cb45c5f6ed 100644 --- a/src/opentimelineio/typeRegistry.cpp +++ b/src/opentimelineio/typeRegistry.cpp @@ -5,6 +5,7 @@ #include "anyDictionary.h" #include "opentimelineio/clip.h" +#include "opentimelineio/colorManagementEffects.h" #include "opentimelineio/composable.h" #include "opentimelineio/composition.h" #include "opentimelineio/effect.h" @@ -25,8 +26,10 @@ #include "opentimelineio/timeEffect.h" #include "opentimelineio/timeline.h" #include "opentimelineio/track.h" +#include "opentimelineio/transformEffects.h" #include "opentimelineio/transition.h" #include "opentimelineio/unknownSchema.h" +#include "opentimelineio/volumeEffects.h" #include "stringUtils.h" #include @@ -54,6 +57,9 @@ TypeRegistry::TypeRegistry() }, "UnknownSchema"); + register_type(); + register_type(); + register_type(); register_type(); register_type(); @@ -86,6 +92,20 @@ TypeRegistry::TypeRegistry() register_type(); register_type(); register_type_from_existing_type("Sequence", 1, "Track", nullptr); + + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); + register_type(); /* diff --git a/src/opentimelineio/volumeEffects.cpp b/src/opentimelineio/volumeEffects.cpp new file mode 100644 index 0000000000..70f260d1eb --- /dev/null +++ b/src/opentimelineio/volumeEffects.cpp @@ -0,0 +1,29 @@ +#include "volumeEffects.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { + +bool AudioVolume::read_from(Reader& reader) { + return reader.read("gain",& _gain) + && Parent::read_from(reader); +} + +void AudioVolume::write_to(Writer &writer) const { + Parent::write_to(writer); + writer.write("gain", _gain); +} + +bool AudioFade::read_from(Reader& reader) { + return reader.read("fade_in", &_fade_in) + && reader.read("start_time", &_start_time) + && reader.read("duration", &_duration) + && Parent::read_from(reader); +} + +void AudioFade::write_to(Writer& writer) const { + Parent::write_to(writer); + writer.write("fade_in", _fade_in); + writer.write("start_time", _start_time); + writer.write("duration", _duration); +} + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/opentimelineio/volumeEffects.h b/src/opentimelineio/volumeEffects.h new file mode 100644 index 0000000000..f700fe5ddf --- /dev/null +++ b/src/opentimelineio/volumeEffects.h @@ -0,0 +1,99 @@ +#pragma once + +#include "opentimelineio/effect.h" +#include "opentimelineio/version.h" + +namespace opentimelineio { namespace OPENTIMELINEIO_VERSION { + +/// @brief Sets the audio volume +class AudioVolume : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "AudioVolume"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new volume effect. + /// + /// @param name The name of the effect object. + /// @param gain Gain value + /// @param metadata The metadata for the effect. + /// @param enabled Whether the effect is enabled. + AudioVolume( + std::string const& name = std::string(), + double gain = 1.0, + AnyDictionary const& metadata = AnyDictionary()) + : Effect(name, Schema::name, metadata) + , _gain(gain) + {} + + double gain() const noexcept { return _gain; } + void set_gain(double gain) noexcept { _gain = gain; } + +protected: + + virtual ~AudioVolume() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + double _gain; ///< the gain +}; + +/// @brief Describes an audio fade effect +class AudioFade : public Effect +{ +public: + /// @brief This struct provides the Effect schema. + struct Schema + { + static auto constexpr name = "AudioFade"; + static int constexpr version = 1; + }; + + using Parent = Effect; + + /// @brief Create a new audio fade effect. + /// + /// @param name The name of the effect object. + /// @param fade_in Whether this is a fade-in (true) or fade-out (false). + /// @param start_time The start time of the fade in seconds. + /// @param duration Duration of the fade in seconds. + /// @param metadata The metadata for the effect. + AudioFade( + std::string const& name = std::string(), + bool fade_in = true, + double start_time = 0.0, + double duration = 0.0, + AnyDictionary const& metadata = AnyDictionary()) + : Effect(name, Schema::name, metadata) + , _fade_in(fade_in) + , _start_time(start_time) + , _duration(duration) + {} + + bool fade_in() const noexcept { return _fade_in; } + void set_fade_in(bool fade_in) noexcept { _fade_in = fade_in; } + + double start_time() const noexcept { return _start_time; } + void set_start_time(double start_time) noexcept { _start_time = start_time; } + + double duration() const noexcept { return _duration; } + void set_duration(double duration) noexcept { _duration = duration; } + +protected: + + virtual ~AudioFade() = default; + bool read_from(Reader&) override; + void write_to(Writer&) const override; + + bool _fade_in; ///< true for fade-in, false for fade-out + double _start_time; ///< start time of the fade in seconds + double _duration; ///< duration of the fade in seconds +}; + +}} // namespace opentimelineio::OPENTIMELINEIO_VERSION diff --git a/src/py-opentimelineio/opentimelineio-bindings/otio_serializableObjects.cpp b/src/py-opentimelineio/opentimelineio-bindings/otio_serializableObjects.cpp index 0b91a1641d..96c2df81b7 100644 --- a/src/py-opentimelineio/opentimelineio-bindings/otio_serializableObjects.cpp +++ b/src/py-opentimelineio/opentimelineio-bindings/otio_serializableObjects.cpp @@ -24,10 +24,13 @@ #include "opentimelineio/timeEffect.h" #include "opentimelineio/timeline.h" #include "opentimelineio/track.h" +#include "opentimelineio/transformEffects.h" +#include "opentimelineio/colorManagementEffects.h" #include "opentimelineio/transition.h" #include "opentimelineio/serializableCollection.h" #include "opentimelineio/stack.h" #include "opentimelineio/unknownSchema.h" +#include "opentimelineio/volumeEffects.h" #include "otio_utils.h" #include "otio_anyDictionary.h" @@ -440,14 +443,14 @@ Contains a :class:`.MediaReference` and a trim on that media reference. "effects"_a = py::none(), "markers"_a = py::none(), "active_media_reference"_a = std::string(Clip::default_media_key)) - .def_property_readonly_static("DEFAULT_MEDIA_KEY",[](py::object /* self */) { - return Clip::default_media_key; + .def_property_readonly_static("DEFAULT_MEDIA_KEY",[](py::object /* self */) { + return Clip::default_media_key; }) .def_property("media_reference", &Clip::media_reference, &Clip::set_media_reference) - .def_property("active_media_reference_key", &Clip::active_media_reference_key, [](Clip* clip, std::string const& new_active_key) { - clip->set_active_media_reference_key(new_active_key, ErrorStatusHandler()); + .def_property("active_media_reference_key", &Clip::active_media_reference_key, [](Clip* clip, std::string const& new_active_key) { + clip->set_active_media_reference_key(new_active_key, ErrorStatusHandler()); }) - .def("media_references", &Clip::media_references) + .def("media_references", &Clip::media_references) .def("set_media_references", [](Clip* clip, Clip::MediaReferences const& media_references, std::string const& new_active_key) { clip->set_media_references(media_references, new_active_key, ErrorStatusHandler()); }); @@ -630,9 +633,10 @@ Should be subclassed (for example by :class:`.Track` and :class:`.Stack`), not u .def(py::init([](std::string name, std::optional> children, std::optional global_start_time, + std::optional canvas_size, py::object metadata) { auto composable_children = vector_or_default(children); - Timeline* t = new Timeline(name, global_start_time, + Timeline* t = new Timeline(name, global_start_time, canvas_size, py_to_any_dictionary(metadata)); if (!composable_children.empty()) t->tracks()->set_children(composable_children, ErrorStatusHandler()); @@ -641,8 +645,10 @@ Should be subclassed (for example by :class:`.Track` and :class:`.Stack`), not u py::arg_v("name"_a = std::string()), "tracks"_a = py::none(), "global_start_time"_a = std::nullopt, + "canvas_size"_a = std::nullopt, py::arg_v("metadata"_a = py::none())) .def_property("global_start_time", &Timeline::global_start_time, &Timeline::set_global_start_time) + .def_property("canvas_size", &Timeline::canvas_size, &Timeline::set_canvas_size) .def_property("tracks", &Timeline::tracks, &Timeline::set_tracks) .def("duration", [](Timeline* t) { return t->duration(ErrorStatusHandler()); @@ -657,7 +663,8 @@ Should be subclassed (for example by :class:`.Track` and :class:`.Stack`), not u }, "search_range"_a = std::nullopt, "shallow_search"_a = false) .def("find_children", [](Timeline* t, py::object descended_from_type, std::optional const& search_range, bool shallow_search) { return find_children(t, descended_from_type, search_range, shallow_search); - }, "descended_from_type"_a = py::none(), "search_range"_a = std::nullopt, "shallow_search"_a = false); + }, "descended_from_type"_a = py::none(), "search_range"_a = std::nullopt, "shallow_search"_a = false) + .def("invalidate_cache", &Timeline::invalidate_cache); } static void define_effects(py::module m) { @@ -707,6 +714,193 @@ Instead it affects the speed of the media displayed within that item. return new FreezeFrame(name, py_to_any_dictionary(metadata)); }), py::arg_v("name"_a = std::string()), py::arg_v("metadata"_a = py::none())); + + py::class_>(m, "VideoScale", py::dynamic_attr(), R"docstring( +An effect that scales video to the given dimensions. +)docstring") + .def(py::init([](std::string name, int64_t width, int64_t height, py::object metadata) { + return new VideoScale(name, width, height, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "width"_a = 0, + "height"_a = 0, + "metadata"_a = py::none()) + .def_property("width", &VideoScale::width, &VideoScale::set_width, "Width to scale to") + .def_property("height", &VideoScale::height, &VideoScale::set_height, "Height to scale to"); + + py::class_>(m, "VideoCrop", py::dynamic_attr(), R"docstring( +An effect that crops video by a given amount of pixels on each side. +)docstring") + .def(py::init([](std::string name, int64_t left, int64_t right, int64_t top, int64_t bottom, py::object metadata) { + return new VideoCrop(name, left, right, top, bottom, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "left"_a = 0, + "right"_a = 0, + "top"_a = 0, + "bottom"_a = 0, + "metadata"_a = py::none()) + .def_property("left", &VideoCrop::left, &VideoCrop::set_left) + .def_property("right", &VideoCrop::right, &VideoCrop::set_right) + .def_property("top", &VideoCrop::top, &VideoCrop::set_top) + .def_property("bottom", &VideoCrop::bottom, &VideoCrop::set_bottom); + + py::class_>(m, "VideoPosition", py::dynamic_attr(), R"docstring( +An effect that positions video by a given offset in the frame. +The position is the location of the top left of the image on the canvas +)docstring") + .def(py::init([](std::string name, int64_t x, int64_t y, py::object metadata) { + return new VideoPosition(name, x, y, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "x"_a = 0, + "y"_a = 0, + "metadata"_a = py::none()) + .def_property("x", &VideoPosition::x, &VideoPosition::set_x) + .def_property("y", &VideoPosition::y, &VideoPosition::set_y); + + py::class_>(m, "VideoRotate", py::dynamic_attr(), R"docstring( +An effect that rotates video by a given amount. +The rotation is specified in degrees clockwise. +)docstring") + .def(py::init([](std::string name, double rotation, py::object metadata) { + return new VideoRotate(name, rotation, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "angle"_a = 0.0, + "metadata"_a = py::none()) + .def_property("angle", &VideoRotate::angle, &VideoRotate::set_angle, "Rotation angle in degrees clockwise"); + + py::class_>(m, "VideoRoundCorners", py::dynamic_attr(), R"docstring( +An effect that rounds the corners of a video +)docstring") + .def(py::init([](std::string name, double radius, py::object metadata) { + return new VideoRoundedCorners(name, radius, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "radius"_a = 0.0, + "metadata"_a = py::none()) + .def_property("radius", &VideoRoundedCorners::radius, &VideoRoundedCorners::set_radius, "Radius of the corners"); + + py::class_>(m, "VideoFlip", py::dynamic_attr(), R"docstring( +An effect that flips video horizontally or vertically. +)docstring") + .def(py::init([](std::string name, bool flip_horizontally, bool flip_vertically, py::object metadata) { + return new VideoFlip(name, flip_horizontally, flip_vertically, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "flip_horizontally"_a = false, + "flip_vertically"_a = false, + "metadata"_a = py::none()) + .def_property("flip_horizontally", &VideoFlip::flip_horizontally, &VideoFlip::set_flip_horizontally) + .def_property("flip_vertically", &VideoFlip::flip_vertically, &VideoFlip::set_flip_vertically); + +auto video_mask_class = + py::class_>(m, "VideoMask", py::dynamic_attr(), R"docstring( +An effect that applies a mask to a video +)docstring"); +video_mask_class + .def(py::init([](std::string name, std::string const& mask_type, const std::string& mask_url, py::object metadata) { + return new VideoMask(name, mask_type, mask_url, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "mask_type"_a = VideoMask::MaskType::remove, + "mask_url"_a = std::string(), + "metadata"_a = py::none()) + .def_property("mask_type", &VideoMask::mask_type, &VideoMask::set_mask_type) + .def_property("mask_url", &VideoMask::mask_url, &VideoMask::set_mask_url) + .def_property("mask_replacement_url", &VideoMask::mask_replacement_url, &VideoMask::set_mask_replacement_url) + .def_property("blur_radius", &VideoMask::blur_radius, &VideoMask::set_blur_radius); + + py::class_(video_mask_class, "MaskType") + .def_property_readonly_static("REMOVE", [](py::object /* self */) { return VideoMask::MaskType::remove; }) + .def_property_readonly_static("REPLACE", [](py::object /* self */) { return VideoMask::MaskType::replace; }) + .def_property_readonly_static("BLUR", [](py::object /* self */) { return VideoMask::MaskType::blur; }); + + py::class_>(m, "VideoBrightness", py::dynamic_attr(), R"docstring( +An effect that adjusts video brightness. +)docstring") + .def(py::init([](std::string name, int64_t brightness, py::object metadata) { + return new VideoBrightness(name, brightness, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "brightness"_a = 0, + "metadata"_a = py::none()) + .def_property("brightness", &VideoBrightness::brightness, &VideoBrightness::set_brightness, "Brightness value"); + + py::class_>(m, "VideoContrast", py::dynamic_attr(), R"docstring( +An effect that adjusts video contrast. +)docstring") + .def(py::init([](std::string name, int64_t contrast, py::object metadata) { + return new VideoContrast(name, contrast, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "contrast"_a = 0, + "metadata"_a = py::none()) + .def_property("contrast", &VideoContrast::contrast, &VideoContrast::set_contrast, "Contrast value"); + + py::class_>(m, "VideoSaturation", py::dynamic_attr(), R"docstring( +An effect that adjusts video saturation. +)docstring") + .def(py::init([](std::string name, int64_t saturation, py::object metadata) { + return new VideoSaturation(name, saturation, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "saturation"_a = 0, + "metadata"_a = py::none()) + .def_property("saturation", &VideoSaturation::saturation, &VideoSaturation::set_saturation, "Saturation value"); + + py::class_>(m, "VideoLightness", py::dynamic_attr(), R"docstring( +An effect that adjusts video lightness. +)docstring") + .def(py::init([](std::string name, int64_t lightness, py::object metadata) { + return new VideoLightness(name, lightness, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "lightness"_a = 0, + "metadata"_a = py::none()) + .def_property("lightness", &VideoLightness::lightness, &VideoLightness::set_lightness, "Lightness value"); + + py::class_>(m, "VideoColorTemperature", py::dynamic_attr(), R"docstring( +An effect that adjusts video color temperature. +)docstring") + .def(py::init([](std::string name, int64_t temperature, py::object metadata) { + return new VideoColorTemperature(name, temperature, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "temperature"_a = 0, + "metadata"_a = py::none()) + .def_property("temperature", &VideoColorTemperature::temperature, &VideoColorTemperature::set_temperature, "Color temperature value"); + + + + py::class_>(m, "AudioVolume", py::dynamic_attr(), R"docstring( +An effect that multiplies the audio volume by a given gain value +)docstring") + .def(py::init([](std::string name, double gain, py::object metadata) { + return new AudioVolume(name, gain, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "gain"_a = 1.0, + "metadata"_a = py::none()) + .def_property("gain", &AudioVolume::gain, &AudioVolume::set_gain, "Gain multiplier"); + + py::class_>(m, "AudioFade", py::dynamic_attr(), R"docstring( +An effect that defines an audio fade. +If fade_in is true, audio is fading in from the start time for the duration +If fade_in is false, the audio is fading out from the start time for the duration +)docstring") + .def(py::init([](std::string name, bool fade_in, double start_time, double duration, py::object metadata) { + return new AudioFade(name, fade_in, start_time, duration, py_to_any_dictionary(metadata)); + }), + "name"_a = std::string(), + "fade_in"_a = true, + "start_time"_a = 0, + "duration"_a = 0, + "metadata"_a = py::none()) + .def_property("fade_in", &AudioFade::fade_in, &AudioFade::set_fade_in, "Fade direction") + .def_property("start_time", &AudioFade::start_time, &AudioFade::set_start_time, "Fade start time") + .def_property("duration", &AudioFade::duration, &AudioFade::set_duration, "Fade duration"); } static void define_media_references(py::module m) { @@ -723,7 +917,7 @@ static void define_media_references(py::module m) { "available_image_bounds"_a = std::nullopt) .def_property("available_range", &MediaReference::available_range, &MediaReference::set_available_range) - .def_property("available_image_bounds", &MediaReference::available_image_bounds, &MediaReference::set_available_image_bounds) + .def_property("available_image_bounds", &MediaReference::available_image_bounds, &MediaReference::set_available_image_bounds) .def_property_readonly("is_missing_reference", &MediaReference::is_missing_reference); py::class_ #include #include +#include #include @@ -154,12 +155,21 @@ main(int argc, char** argv) using namespace otio; static constexpr auto time_scalar = 1.5; + static constexpr auto width = 1920; + static constexpr auto height = 1280; SerializableObject::Retainer ltw(new LinearTimeWarp( LinearTimeWarp::Schema::name, LinearTimeWarp::Schema::name, time_scalar)); - std::vector effects = { ltw }; + + SerializableObject::Retainer vscl( + new VideoScale( + VideoScale::Schema::name, + 100, + 200)); + + std::vector effects = { ltw, vscl }; static constexpr auto red = Marker::Color::red; @@ -247,11 +257,15 @@ main(int argc, char** argv) clip->set_media_references({ { "cloud", ref4 } }, "cloud"); assertEqual(clip->media_reference(), ref4.value); - // basic test for an effect + // basic test for effects assertEqual(clip->effects().size(), effects.size()); auto effect = dynamic_cast( - clip->effects().front().value); + clip->effects()[0].value); assertEqual(effect->time_scalar(), time_scalar); + auto scale = dynamic_cast( + clip->effects()[1].value); + assertEqual(scale->width(), 100); + assertEqual(scale->height(), 200); // basic test for a marker assertEqual(clip->markers().size(), markers.size()); diff --git a/tests/test_color_management_effects.cpp b/tests/test_color_management_effects.cpp new file mode 100644 index 0000000000..e43ba1ecbe --- /dev/null +++ b/tests/test_color_management_effects.cpp @@ -0,0 +1,194 @@ +#include "utils.h" + +#include +#include +#include +#include + +namespace otime = opentime::OPENTIME_VERSION; +namespace otio = opentimelineio::OPENTIMELINEIO_VERSION; + +int +main(int argc, char** argv) +{ + Tests tests; + tests.add_test("test_color_management_effects_read", [] { + using namespace otio; + + otio::ErrorStatus status; + SerializableObject::Retainer<> so = + SerializableObject::from_json_string( + R"( + { + "OTIO_SCHEMA": "Clip.1", + "media_reference": { + "OTIO_SCHEMA": "ExternalReference.1", + "target_url": "unit_test_url", + "available_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 8 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 10 + } + } + }, + "effects": [ + { + "OTIO_SCHEMA": "VideoBrightness.1", + "name": "brightness", + "brightness": 50, + "effect_name": "VideoBrightness", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoContrast.1", + "name": "contrast", + "contrast": 20, + "effect_name": "VideoContrast", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoSaturation.1", + "name": "saturation", + "saturation": 70, + "effect_name": "VideoSaturation", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoLightness.1", + "name": "lightness", + "lightness": 10, + "effect_name": "VideoLightness", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoColorTemperature.1", + "name": "temperature", + "temperature": 6500, + "effect_name": "VideoColorTemperature", + "enabled": true + } + ] + })", + &status); + + if (is_error(status)) + throw std::invalid_argument(status.details); + + const Clip* clip = dynamic_cast(so.value); + assertNotNull(clip); + + auto effects = clip->effects(); + assertEqual(effects.size(), 5); + + auto video_brightness = dynamic_cast(effects[0].value); + assertNotNull(video_brightness); + assertEqual(video_brightness->brightness(), 50); + + auto video_contrast = dynamic_cast(effects[1].value); + assertNotNull(video_contrast); + assertEqual(video_contrast->contrast(), 20); + + auto video_saturation = dynamic_cast(effects[2].value); + assertNotNull(video_saturation); + assertEqual(video_saturation->saturation(), 70); + + auto video_lightness = dynamic_cast(effects[3].value); + assertNotNull(video_lightness); + assertEqual(video_lightness->lightness(), 10); + + auto video_temperature = dynamic_cast(effects[4].value); + assertNotNull(video_temperature); + assertEqual(video_temperature->temperature(), 6500); + }); + + tests.add_test("test_color_management_effects_write", [] { + using namespace otio; + + SerializableObject::Retainer clip(new otio::Clip( + "unit_clip", + new otio::ExternalReference("unit_test_url"), + std::nullopt, + otio::AnyDictionary(), + { new otio::VideoBrightness("brightness", 50), + new otio::VideoContrast("contrast", 20), + new otio::VideoSaturation("saturation", 70), + new otio::VideoLightness("lightness", 10), + new otio::VideoColorTemperature("temperature", 6500)})); + + auto json = clip.value->to_json_string(); + + std::string expected_json = R"({ + "OTIO_SCHEMA": "Clip.2", + "metadata": {}, + "name": "unit_clip", + "source_range": null, + "effects": [ + { + "OTIO_SCHEMA": "VideoBrightness.1", + "metadata": {}, + "name": "brightness", + "effect_name": "VideoBrightness", + "enabled": true, + "brightness": 50 + }, + { + "OTIO_SCHEMA": "VideoContrast.1", + "metadata": {}, + "name": "contrast", + "effect_name": "VideoContrast", + "enabled": true, + "contrast": 20 + }, + { + "OTIO_SCHEMA": "VideoSaturation.1", + "metadata": {}, + "name": "saturation", + "effect_name": "VideoSaturation", + "enabled": true, + "saturation": 70 + }, + { + "OTIO_SCHEMA": "VideoLightness.1", + "metadata": {}, + "name": "lightness", + "effect_name": "VideoLightness", + "enabled": true, + "lightness": 10 + }, + { + "OTIO_SCHEMA": "VideoColorTemperature.1", + "metadata": {}, + "name": "temperature", + "effect_name": "VideoColorTemperature", + "enabled": true, + "temperature": 6500 + } + ], + "markers": [], + "enabled": true, + "media_references": { + "DEFAULT_MEDIA": { + "OTIO_SCHEMA": "ExternalReference.1", + "metadata": {}, + "name": "", + "available_range": null, + "available_image_bounds": null, + "target_url": "unit_test_url" + } + }, + "active_media_reference_key": "DEFAULT_MEDIA" +})"; + + assertEqual(json, expected_json); + }); + + tests.run(argc, argv); + return 0; +} diff --git a/tests/test_color_management_effects.py b/tests/test_color_management_effects.py new file mode 100644 index 0000000000..235ccf1901 --- /dev/null +++ b/tests/test_color_management_effects.py @@ -0,0 +1,145 @@ +"""Color management effects class test harness.""" + +import unittest +import opentimelineio as otio +import opentimelineio.test_utils as otio_test_utils + +class VideoBrightnessTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + brightness = otio.schema.VideoBrightness( + name="BrightIt", + brightness=50, + metadata={"foo": "bar"} + ) + self.assertEqual(brightness.brightness, 50) + self.assertEqual(brightness.name, "BrightIt") + self.assertEqual(brightness.metadata, {"foo": "bar"}) + + def test_eq(self): + b1 = otio.schema.VideoBrightness(name="BrightIt", brightness=50, metadata={"foo": "bar"}) + b2 = otio.schema.VideoBrightness(name="BrightIt", brightness=50, metadata={"foo": "bar"}) + self.assertIsOTIOEquivalentTo(b1, b2) + + def test_serialize(self): + brightness = otio.schema.VideoBrightness(name="BrightIt", brightness=50, metadata={"foo": "bar"}) + encoded = otio.adapters.otio_json.write_to_string(brightness) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(brightness, decoded) + + def test_setters(self): + brightness = otio.schema.VideoBrightness(name="BrightIt", brightness=50, metadata={"foo": "bar"}) + self.assertEqual(brightness.brightness, 50) + brightness.brightness = 100 + self.assertEqual(brightness.brightness, 100) + +class VideoContrastTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + contrast = otio.schema.VideoContrast( + name="ContrastIt", + contrast=20, + metadata={"foo": "bar"} + ) + self.assertEqual(contrast.contrast, 20) + self.assertEqual(contrast.name, "ContrastIt") + self.assertEqual(contrast.metadata, {"foo": "bar"}) + + def test_eq(self): + c1 = otio.schema.VideoContrast(name="ContrastIt", contrast=20, metadata={"foo": "bar"}) + c2 = otio.schema.VideoContrast(name="ContrastIt", contrast=20, metadata={"foo": "bar"}) + self.assertIsOTIOEquivalentTo(c1, c2) + + def test_serialize(self): + contrast = otio.schema.VideoContrast(name="ContrastIt", contrast=20, metadata={"foo": "bar"}) + encoded = otio.adapters.otio_json.write_to_string(contrast) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(contrast, decoded) + + def test_setters(self): + contrast = otio.schema.VideoContrast(name="ContrastIt", contrast=20, metadata={"foo": "bar"}) + self.assertEqual(contrast.contrast, 20) + contrast.contrast = 40 + self.assertEqual(contrast.contrast, 40) + +class VideoSaturationTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + saturation = otio.schema.VideoSaturation( + name="SaturateIt", + saturation=70, + metadata={"foo": "bar"} + ) + self.assertEqual(saturation.saturation, 70) + self.assertEqual(saturation.name, "SaturateIt") + self.assertEqual(saturation.metadata, {"foo": "bar"}) + + def test_eq(self): + s1 = otio.schema.VideoSaturation(name="SaturateIt", saturation=70, metadata={"foo": "bar"}) + s2 = otio.schema.VideoSaturation(name="SaturateIt", saturation=70, metadata={"foo": "bar"}) + self.assertIsOTIOEquivalentTo(s1, s2) + + def test_serialize(self): + saturation = otio.schema.VideoSaturation(name="SaturateIt", saturation=70, metadata={"foo": "bar"}) + encoded = otio.adapters.otio_json.write_to_string(saturation) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(saturation, decoded) + + def test_setters(self): + saturation = otio.schema.VideoSaturation(name="SaturateIt", saturation=70, metadata={"foo": "bar"}) + self.assertEqual(saturation.saturation, 70) + saturation.saturation = 100 + self.assertEqual(saturation.saturation, 100) + +class VideoLightnessTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + lightness = otio.schema.VideoLightness( + name="LightIt", + lightness=10, + metadata={"foo": "bar"} + ) + self.assertEqual(lightness.lightness, 10) + self.assertEqual(lightness.name, "LightIt") + self.assertEqual(lightness.metadata, {"foo": "bar"}) + + def test_eq(self): + l1 = otio.schema.VideoLightness(name="LightIt", lightness=10, metadata={"foo": "bar"}) + l2 = otio.schema.VideoLightness(name="LightIt", lightness=10, metadata={"foo": "bar"}) + self.assertIsOTIOEquivalentTo(l1, l2) + + def test_serialize(self): + lightness = otio.schema.VideoLightness(name="LightIt", lightness=10, metadata={"foo": "bar"}) + encoded = otio.adapters.otio_json.write_to_string(lightness) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(lightness, decoded) + + def test_setters(self): + lightness = otio.schema.VideoLightness(name="LightIt", lightness=10, metadata={"foo": "bar"}) + self.assertEqual(lightness.lightness, 10) + lightness.lightness = 20 + self.assertEqual(lightness.lightness, 20) + +class VideoColorTemperatureTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + temp = otio.schema.VideoColorTemperature( + name="TempIt", + temperature=6500, + metadata={"foo": "bar"} + ) + self.assertEqual(temp.temperature, 6500) + self.assertEqual(temp.name, "TempIt") + self.assertEqual(temp.metadata, {"foo": "bar"}) + + def test_eq(self): + t1 = otio.schema.VideoColorTemperature(name="TempIt", temperature=6500, metadata={"foo": "bar"}) + t2 = otio.schema.VideoColorTemperature(name="TempIt", temperature=6500, metadata={"foo": "bar"}) + self.assertIsOTIOEquivalentTo(t1, t2) + + def test_serialize(self): + temp = otio.schema.VideoColorTemperature(name="TempIt", temperature=6500, metadata={"foo": "bar"}) + encoded = otio.adapters.otio_json.write_to_string(temp) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(temp, decoded) + + def test_setters(self): + temp = otio.schema.VideoColorTemperature(name="TempIt", temperature=6500, metadata={"foo": "bar"}) + self.assertEqual(temp.temperature, 6500) + temp.temperature = 7000 + self.assertEqual(temp.temperature, 7000) diff --git a/tests/test_composition.py b/tests/test_composition.py index 40d4e9f4be..7c6c88c19d 100755 --- a/tests/test_composition.py +++ b/tests/test_composition.py @@ -1895,6 +1895,7 @@ def _nest(self, item): wrapper = _nest(self, clip) wrappers.append(wrapper) + timeline.invalidate_cache() # nothing should have shifted at all # print otio.adapters.otio_json.write_to_string(timeline) @@ -1928,6 +1929,8 @@ def _nest(self, item): # print otio.adapters.otio_json.write_to_string(timeline) + timeline.invalidate_cache() + # the clip should be the same self.assertEqual(clip.duration(), onehundred) diff --git a/tests/test_console.py b/tests/test_console.py index 845abb7207..7cf47a9018 100755 --- a/tests/test_console.py +++ b/tests/test_console.py @@ -111,7 +111,7 @@ class OTIOStatTest(ConsoleTester, unittest.TestCase): def test_basic(self): sys.argv = ['otiostat', SCREENING_EXAMPLE_PATH] self.run_test() - self.assertIn("top level object: Timeline.1", sys.stdout.getvalue()) + self.assertIn("top level object: Timeline.2", sys.stdout.getvalue()) OTIOStatTest_ShellOut = CreateShelloutTest(OTIOStatTest) diff --git a/tests/test_serialization.cpp b/tests/test_serialization.cpp index 53c352b2ca..e0c55f9477 100644 --- a/tests/test_serialization.cpp +++ b/tests/test_serialization.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -13,6 +14,7 @@ #include #include +#include namespace otime = opentime::OPENTIME_VERSION; namespace otio = opentimelineio::OPENTIMELINEIO_VERSION; @@ -26,6 +28,10 @@ main(int argc, char** argv) "success with default indent", [] { otio::SerializableObject::Retainer cl = new otio::Clip(); + + otio::SerializableObject::Retainer vs = + new otio::VideoScale("scale", 1920, 1280); + cl->effects().push_back(vs); otio::SerializableObject::Retainer tr = new otio::Track(); tr->append_child(cl); @@ -37,10 +43,11 @@ main(int argc, char** argv) auto output = tl.value->to_json_string(&err, {}); assertFalse(otio::is_error(err)); assertEqual(output.c_str(), R"CONTENT({ - "OTIO_SCHEMA": "Timeline.1", + "OTIO_SCHEMA": "Timeline.2", "metadata": {}, "name": "", "global_start_time": null, + "canvas_size": null, "tracks": { "OTIO_SCHEMA": "Stack.1", "metadata": {}, @@ -64,7 +71,17 @@ main(int argc, char** argv) "metadata": {}, "name": "", "source_range": null, - "effects": [], + "effects": [ + { + "OTIO_SCHEMA": "VideoScale.1", + "metadata": {}, + "name": "scale", + "effect_name": "VideoScale", + "enabled": true, + "width": 1920, + "height": 1280 + } + ], "markers": [], "enabled": true, "media_references": { diff --git a/tests/test_timeline_algo.py b/tests/test_timeline_algo.py index 8153b5c2ea..a5bac5cdaf 100644 --- a/tests/test_timeline_algo.py +++ b/tests/test_timeline_algo.py @@ -18,7 +18,8 @@ def make_sample_timeline(self): result = otio.adapters.read_from_string( """ { - "OTIO_SCHEMA": "Timeline.1", + "OTIO_SCHEMA": "Timeline.2", + "canvas_size": null, "metadata": {}, "name": null, "tracks": { @@ -264,7 +265,8 @@ def test_trim_with_transitions(self): expected = otio.adapters.read_from_string( """ { - "OTIO_SCHEMA": "Timeline.1", + "OTIO_SCHEMA": "Timeline.2", + "canvas_size": null, "metadata": {}, "name": null, "tracks": { diff --git a/tests/test_transform_effects.cpp b/tests/test_transform_effects.cpp new file mode 100644 index 0000000000..98b42d417c --- /dev/null +++ b/tests/test_transform_effects.cpp @@ -0,0 +1,288 @@ +#include "utils.h" + +#include +#include +#include +#include + +namespace otime = opentime::OPENTIME_VERSION; +namespace otio = opentimelineio::OPENTIMELINEIO_VERSION; + +int +main(int argc, char** argv) +{ + Tests tests; + tests.add_test("test_video_transform_read", [] { + using namespace otio; + + otio::ErrorStatus status; + SerializableObject::Retainer<> so = + SerializableObject::from_json_string( + R"( + { + "OTIO_SCHEMA": "Clip.1", + "media_reference": { + "OTIO_SCHEMA": "ExternalReference.1", + "target_url": "unit_test_url", + "available_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 8 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 10 + } + } + }, + "effects": [ + { + "OTIO_SCHEMA": "VideoScale.1", + "name": "scale", + "width": 100, + "height": 120, + "effect_name": "VideoScale", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoPosition.1", + "name": "position", + "x": 10, + "y": 20, + "effect_name": "VideoPosition", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoRotate.1", + "name": "rotate", + "angle": 45.5, + "effect_name": "VideoRotate", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoCrop.1", + "name": "crop", + "left": 5, + "right": 6, + "top": 7, + "bottom": 8, + "effect_name": "VideoCrop", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoRoundedCorners.1", + "name": "roundedCorners", + "radius": 80, + "effect_name": "VideoRoundedCorners", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoFlip.1", + "name": "flip", + "flip_horizontally": true, + "flip_vertically": false, + "effect_name": "VideoFlip", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoMask.1", + "name": "mask", + "mask_type": "REMOVE", + "mask_url": "mask_url", + "effect_name": "VideoMaskRemove", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoMask.1", + "name": "mask", + "mask_type": "REPLACE", + "mask_url": "mask_url", + "effect_name": "VideoMaskReplace", + "mask_replacement_url": "mask_replacement_url", + "enabled": true + }, + { + "OTIO_SCHEMA": "VideoMask.1", + "name": "mask", + "mask_type": "BLUR", + "mask_url": "mask_url", + "effect_name": "VideoMaskBlur", + "blur_radius": 10.1, + "enabled": true + } + ] + })", + &status); + + if (is_error(status)) + throw std::invalid_argument(status.details); + + const Clip* clip = dynamic_cast(so.value); + assertNotNull(clip); + + auto effects = clip->effects(); + assertEqual(effects.size(), 9); + + auto video_scale = dynamic_cast(effects[0].value); + assertNotNull(video_scale); + assertEqual(video_scale->width(), 100); + assertEqual(video_scale->height(), 120); + + auto video_position = dynamic_cast(effects[1].value); + assertNotNull(video_position); + assertEqual(video_position->x(), 10); + assertEqual(video_position->y(), 20); + + auto video_rotate = dynamic_cast(effects[2].value); + assertNotNull(video_rotate); + assertEqual(video_rotate->angle(), 45.5); + + auto video_crop = dynamic_cast(effects[3].value); + assertNotNull(video_crop); + assertEqual(video_crop->left(), 5); + assertEqual(video_crop->right(), 6); + assertEqual(video_crop->top(), 7); + assertEqual(video_crop->bottom(), 8); + + auto video_rounded_corners = dynamic_cast(effects[4].value); + assertNotNull(video_rounded_corners); + assertEqual(video_rounded_corners->radius(), 80); + + auto video_flip = dynamic_cast(effects[5].value); + assertNotNull(video_flip); + assertEqual(video_flip->flip_horizontally(), true); + assertEqual(video_flip->flip_vertically(), false); + + auto video_mask_remove = dynamic_cast(effects[6].value); + assertNotNull(video_mask_remove); + assertEqual(video_mask_remove->mask_type(), std::string(VideoMask::MaskType::remove)); + assertEqual(video_mask_remove->mask_url(), std::string("mask_url")); + + auto video_mask_replace = dynamic_cast(effects[7].value); + assertNotNull(video_mask_replace); + assertEqual(video_mask_replace->mask_type(), std::string(VideoMask::MaskType::replace)); + assertEqual(video_mask_replace->mask_url(), std::string("mask_url")); + assertEqual(video_mask_replace->mask_replacement_url().value(), std::string("mask_replacement_url")); + + auto video_mask_blur = dynamic_cast(effects[8].value); + assertNotNull(video_mask_blur); + assertEqual(video_mask_blur->mask_type(), std::string(VideoMask::MaskType::blur)); + assertEqual(video_mask_blur->mask_url(), std::string("mask_url")); + assertEqual(video_mask_blur->blur_radius().value(), 10.1); + }); + + tests.add_test("test_video_transform_write", [] { + using namespace otio; + + SerializableObject::Retainer clip(new otio::Clip( + "unit_clip", + new otio::ExternalReference("unit_test_url"), + std::nullopt, + otio::AnyDictionary(), + { new otio::VideoScale("scale", 100, 120), + new otio::VideoPosition("position", 10, 20), + new otio::VideoRotate("rotate", 40.5), + new otio::VideoCrop("crop", 1, 2, 3, 4), + new otio::VideoRoundedCorners("roundedCorners",80), + new otio::VideoFlip("flip", true, false), + new otio::VideoMask("mask", otio::VideoMask::MaskType::remove, "mask_url") + })); + + auto json = clip.value->to_json_string(); + + std::string expected_json = R"({ + "OTIO_SCHEMA": "Clip.2", + "metadata": {}, + "name": "unit_clip", + "source_range": null, + "effects": [ + { + "OTIO_SCHEMA": "VideoScale.1", + "metadata": {}, + "name": "scale", + "effect_name": "VideoScale", + "enabled": true, + "width": 100, + "height": 120 + }, + { + "OTIO_SCHEMA": "VideoPosition.1", + "metadata": {}, + "name": "position", + "effect_name": "VideoPosition", + "enabled": true, + "x": 10, + "y": 20 + }, + { + "OTIO_SCHEMA": "VideoRotate.1", + "metadata": {}, + "name": "rotate", + "effect_name": "VideoRotate", + "enabled": true, + "angle": 40.5 + }, + { + "OTIO_SCHEMA": "VideoCrop.1", + "metadata": {}, + "name": "crop", + "effect_name": "VideoCrop", + "enabled": true, + "left": 1, + "right": 2, + "top": 3, + "bottom": 4 + }, + { + "OTIO_SCHEMA": "VideoRoundedCorners.1", + "metadata": {}, + "name": "roundedCorners", + "effect_name": "VideoRoundedCorners", + "enabled": true, + "radius": 80 + }, + { + "OTIO_SCHEMA": "VideoFlip.1", + "metadata": {}, + "name": "flip", + "effect_name": "VideoFlip", + "enabled": true, + "flip_horizontally": true, + "flip_vertically": false + }, + { + "OTIO_SCHEMA": "VideoMask.1", + "metadata": {}, + "name": "mask", + "effect_name": "VideoMask", + "enabled": true, + "mask_type": "REMOVE", + "mask_url": "mask_url" + } + ], + "markers": [], + "enabled": true, + "media_references": { + "DEFAULT_MEDIA": { + "OTIO_SCHEMA": "ExternalReference.1", + "metadata": {}, + "name": "", + "available_range": null, + "available_image_bounds": null, + "target_url": "unit_test_url" + } + }, + "active_media_reference_key": "DEFAULT_MEDIA" +})"; + + + assertEqual(json, expected_json); + + }); + + tests.run(argc, argv); + return 0; +} diff --git a/tests/test_transform_effects.py b/tests/test_transform_effects.py new file mode 100644 index 0000000000..ae02913e7c --- /dev/null +++ b/tests/test_transform_effects.py @@ -0,0 +1,451 @@ +"""Transform effects class test harness.""" + +import unittest +from fractions import Fraction + +import opentimelineio as otio +import opentimelineio.test_utils as otio_test_utils + +class VideoScaleTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + scale = otio.schema.VideoScale( + name="ScaleIt", + width=100, + height=120, + metadata={ + "foo": "bar" + } + ) + self.assertEqual(scale.width, 100) + self.assertEqual(scale.height, 120) + self.assertEqual(scale.name, "ScaleIt") + self.assertEqual(scale.metadata, {"foo": "bar"}) + + def test_eq(self): + scale1 = otio.schema.VideoScale( + name="ScaleIt", + width=120, + height=130, + metadata={ + "foo": "bar" + } + ) + scale2 = otio.schema.VideoScale( + name="ScaleIt", + width=120, + height=130, + metadata={ + "foo": "bar" + } + ) + self.assertIsOTIOEquivalentTo(scale1, scale2) + + def test_serialize(self): + scale = otio.schema.VideoScale( + name="ScaleIt", + width=130, + height=140, + metadata={ + "foo": "bar" + } + ) + encoded = otio.adapters.otio_json.write_to_string(scale) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(scale, decoded) + + def test_setters(self): + scale = otio.schema.VideoScale( + name="ScaleIt", + width=140, + height=150, + metadata={ + "foo": "bar" + } + ) + self.assertEqual(scale.width, 140) + scale.width = 100 + self.assertEqual(scale.width,100) + self.assertEqual(scale.height, 150) + scale.height = 100 + self.assertEqual(scale.height,100) + +class VideoCropTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_cons(self): + crop = otio.schema.VideoCrop( + name="CropIt", + left=2, + right=3, + top=4, + bottom=5, + metadata={ + "baz": "qux" + } + ) + self.assertEqual(crop.left, 2) + self.assertEqual(crop.right, 3) + self.assertEqual(crop.top, 4) + self.assertEqual(crop.bottom, 5) + self.assertEqual(crop.name, "CropIt") + self.assertEqual(crop.metadata, {"baz": "qux"}) + + def test_eq(self): + crop1 = otio.schema.VideoCrop( + name="CropIt", + left=2, + right=3, + top=4, + bottom=5, + metadata={ + "baz": "qux" + } + ) + crop2 = otio.schema.VideoCrop( + name="CropIt", + left=2, + right=3, + top=4, + bottom=5, + metadata={ + "baz": "qux" + } + ) + self.assertIsOTIOEquivalentTo(crop1, crop2) + + def test_serialize(self): + crop = otio.schema.VideoCrop( + name="CropIt", + left=2, + right=3, + top=4, + bottom=5, + metadata={ + "baz": "qux" + } + ) + encoded = otio.adapters.otio_json.write_to_string(crop) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(crop, decoded) + + def test_setters(self): + crop = otio.schema.VideoCrop( + name="CropIt", + left=2, + right=3, + top=4, + bottom=5, + metadata={ + "baz": "qux" + } + ) + self.assertEqual(crop.left, 2) + crop.left = 1 + self.assertEqual(crop.left, 1) + crop.right = 3 + self.assertEqual(crop.right, 3) + crop.top = 4 + self.assertEqual(crop.top, 4) + crop.bottom = 7 + self.assertEqual(crop.bottom, 7) + +class VideoPositionTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + position = otio.schema.VideoPosition( + name="PositionIt", + x=11, + y=12, + metadata={ + "alpha": "beta" + } + ) + self.assertEqual(position.x, 11) + self.assertEqual(position.y, 12) + self.assertEqual(position.name, "PositionIt") + self.assertEqual(position.metadata, {"alpha": "beta"}) + + def test_eq(self): + pos1 = otio.schema.VideoPosition( + name="PositionIt", + x=11, + y=12, + metadata={ + "alpha": "beta" + } + ) + pos2 = otio.schema.VideoPosition( + name="PositionIt", + x=11, + y=12, + metadata={ + "alpha": "beta" + } + ) + self.assertIsOTIOEquivalentTo(pos1, pos2) + + def test_serialize(self): + position = otio.schema.VideoPosition( + name="PositionIt", + x=11, + y=12, + metadata={ + "alpha": "beta" + } + ) + encoded = otio.adapters.otio_json.write_to_string(position) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(position, decoded) + + def test_setters(self): + position = otio.schema.VideoPosition( + name="PositionIt", + x=11, + y=12, + metadata={ + "alpha": "beta" + } + ) + self.assertEqual(position.x, 11) + position.x = 1 + self.assertEqual(position.x, 1) + self.assertEqual(position.y, 12) + position.y = 2 + self.assertEqual(position.y, 2) + +class VideoRotateTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + rotate = otio.schema.VideoRotate( + name="RotateIt", + angle=45.25, + metadata={ + "rot": "val" + } + ) + self.assertEqual(rotate.angle,45.25) + self.assertEqual(rotate.name, "RotateIt") + self.assertEqual(rotate.metadata, {"rot": "val"}) + + def test_eq(self): + rot1 = otio.schema.VideoRotate( + name="RotateIt", + angle=45.25, + metadata={ + "rot": "val" + } + ) + rot2 = otio.schema.VideoRotate( + name="RotateIt", + angle=45.25, + metadata={ + "rot": "val" + } + ) + self.assertIsOTIOEquivalentTo(rot1, rot2) + + def test_serialize(self): + rotate = otio.schema.VideoRotate( + name="RotateIt", + angle=45.25, + metadata={ + "rot": "val" + } + ) + encoded = otio.adapters.otio_json.write_to_string(rotate) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(rotate, decoded) + + def test_setters(self): + rotate = otio.schema.VideoRotate( + name="RotateIt", + angle=45.25, + metadata={ + "rot": "val" + } + ) + self.assertEqual(rotate.angle, 45.25) + rotate.angle = 90.0 + self.assertEqual(rotate.angle, 90.0) + +class VideoRoundCornersTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + rounded_corners = otio.schema.VideoRoundCorners( + name="doRoundCorners", + radius=10, + metadata={ + "round": "corners" + } + ) + self.assertEqual(rounded_corners.radius, 10) + self.assertEqual(rounded_corners.name, "doRoundCorners") + self.assertEqual(rounded_corners.metadata, {"round": "corners"}) + + def test_eq(self): + rounded_corners1 = otio.schema.VideoRoundCorners( + name="doRoundCorners", + radius=10, + metadata={ + "round": "corners" + } + ) + rounded_corners2 = otio.schema.VideoRoundCorners( + name="doRoundCorners", + radius=10, + metadata={ + "round": "corners" + } + ) + self.assertIsOTIOEquivalentTo(rounded_corners1, rounded_corners2) + + def test_serialize(self): + rounded_corners = otio.schema.VideoRoundCorners( + name="doRoundCorners", + radius=10, + metadata={ + "round": "corners" + } + ) + encoded = otio.adapters.otio_json.write_to_string(rounded_corners) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(rounded_corners, decoded) + + def test_setters(self): + rounded_corners = otio.schema.VideoRoundCorners( + name="doRoundCorners", + radius=10, + metadata={ + "round": "corners" + } + ) + self.assertEqual(rounded_corners.radius, 10) + rounded_corners.radius = 20 + self.assertEqual(rounded_corners.radius, 20) + +class VideoFlipTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + flip = otio.schema.VideoFlip( + name="FlipIt", + flip_horizontally=True, + flip_vertically=False, + metadata={ + "flip": "val" + } + ) + self.assertEqual(flip.flip_horizontally, True) + self.assertEqual(flip.flip_vertically, False) + self.assertEqual(flip.name, "FlipIt") + self.assertEqual(flip.metadata, {"flip": "val"}) + + def test_eq(self): + flip1 = otio.schema.VideoFlip( + name="FlipIt", + flip_horizontally=True, + flip_vertically=False, + metadata={ + "flip": "val" + } + ) + flip2 = otio.schema.VideoFlip( + name="FlipIt", + flip_horizontally=True, + flip_vertically=False, + metadata={ + "flip": "val" + } + ) + self.assertIsOTIOEquivalentTo(flip1, flip2) + + def test_serialize(self): + flip = otio.schema.VideoFlip( + name="FlipIt", + flip_horizontally=True, + flip_vertically=False, + metadata={ + "flip": "val" + } + ) + encoded = otio.adapters.otio_json.write_to_string(flip) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(flip, decoded) + + def test_setters(self): + flip = otio.schema.VideoFlip( + name="FlipIt", + flip_horizontally=True, + flip_vertically=False, + metadata={ + "flip": "val" + } + ) + self.assertEqual(flip.flip_horizontally, True) + flip.flip_horizontally = False + self.assertEqual(flip.flip_horizontally, False) + self.assertEqual(flip.flip_vertically, False) + flip.flip_vertically = True + self.assertEqual(flip.flip_vertically, True) + +class VideoMaskTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + mask = otio.schema.VideoMask( + name="MaskIt", + mask_type=otio.schema.MaskType.REMOVE, + mask_url="mask_url", + metadata={ + "mask": "val" + } + ) + self.assertEqual(mask.mask_type, otio.schema.MaskType.REMOVE) + self.assertEqual(mask.mask_url, "mask_url") + self.assertEqual(mask.name, "MaskIt") + self.assertEqual(mask.metadata, {"mask": "val"}) + + def test_eq(self): + mask1 = otio.schema.VideoMask( + name="MaskIt", + mask_type=otio.schema.MaskType.REMOVE, + mask_url="mask_url", + metadata={ + "mask": "val" + } + ) + mask2 = otio.schema.VideoMask( + name="MaskIt", + mask_type=otio.schema.MaskType.REMOVE, + mask_url="mask_url", + metadata={ + "mask": "val" + } + ) + self.assertIsOTIOEquivalentTo(mask1, mask2) + + def test_serialize(self): + mask = otio.schema.VideoMask( + name="MaskIt", + mask_type=otio.schema.MaskType.REMOVE, + mask_url="mask_url", + metadata={ + "mask": "val" + } + ) + encoded = otio.adapters.otio_json.write_to_string(mask) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(mask, decoded) + + def test_setters(self): + mask = otio.schema.VideoMask( + name="MaskIt", + mask_type=otio.schema.MaskType.REMOVE, + mask_url="mask_url", + metadata={ + "mask": "val" + } + ) + self.assertEqual(mask.mask_type, otio.schema.MaskType.REMOVE) + mask.mask_type = otio.schema.MaskType.REPLACE + self.assertEqual(mask.mask_type, otio.schema.MaskType.REPLACE) + mask.mask_url = "mask_url_2" + self.assertEqual(mask.mask_url, "mask_url_2") + mask.mask_replacement_url = "mask_replacement_url" + self.assertEqual(mask.mask_replacement_url, "mask_replacement_url") + mask.blur_radius = 10.0 + self.assertEqual(mask.blur_radius, 10.0) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/test_volume_effects.cpp b/tests/test_volume_effects.cpp new file mode 100644 index 0000000000..c7d40d511c --- /dev/null +++ b/tests/test_volume_effects.cpp @@ -0,0 +1,140 @@ +#include "utils.h" + +#include +#include +#include +#include + +namespace otime = opentime::OPENTIME_VERSION; +namespace otio = opentimelineio::OPENTIMELINEIO_VERSION; + +int +main(int argc, char** argv) +{ + Tests tests; + tests.add_test("test_audio_volume_read", [] { + using namespace otio; + + otio::ErrorStatus status; + SerializableObject::Retainer<> so = + SerializableObject::from_json_string( + R"( + { + "OTIO_SCHEMA": "Clip.1", + "media_reference": { + "OTIO_SCHEMA": "ExternalReference.1", + "target_url": "unit_test_url", + "available_range": { + "OTIO_SCHEMA": "TimeRange.1", + "duration": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 8 + }, + "start_time": { + "OTIO_SCHEMA": "RationalTime.1", + "rate": 24, + "value": 10 + } + } + }, + "effects": [ + { + "OTIO_SCHEMA": "AudioVolume.1", + "name": "volume", + "gain": 0.5, + "effect_name": "AudioVolume", + "enabled": true + }, + { + "OTIO_SCHEMA": "AudioFade.1", + "name": "fade", + "fade_in": false, + "start_time": 1.5, + "duration": 5.0, + "effect_name": "AudioFade", + "enabled": true + } + ] + })", + &status); + + assertFalse(is_error(status)); + + const Clip* clip = dynamic_cast(so.value); + assertNotNull(clip); + + auto effects = clip->effects(); + assertEqual(effects.size(), 2); + + auto audio_volume = dynamic_cast(effects[0].value); + assertNotNull(audio_volume); + assertEqual(audio_volume->gain(), 0.5); + + auto audio_fade = dynamic_cast(effects[1].value); + assertNotNull(audio_fade); + assertEqual(audio_fade->fade_in(), false); + assertEqual(audio_fade->start_time(), 1.5); + assertEqual(audio_fade->duration(), 5.0); + }); + + tests.add_test("test_audio_volume_write", [] { + using namespace otio; + + SerializableObject::Retainer clip(new otio::Clip( + "unit_clip", + new otio::ExternalReference("unit_test_url"), + std::nullopt, + otio::AnyDictionary(), + { new otio::AudioVolume("volume", 0.75), + new otio::AudioFade("fade", true, 2.0, 10.5)})); + + auto json = clip.value->to_json_string(); + + std::string expected_json = R"({ + "OTIO_SCHEMA": "Clip.2", + "metadata": {}, + "name": "unit_clip", + "source_range": null, + "effects": [ + { + "OTIO_SCHEMA": "AudioVolume.1", + "metadata": {}, + "name": "volume", + "effect_name": "AudioVolume", + "enabled": true, + "gain": 0.75 + }, + { + "OTIO_SCHEMA": "AudioFade.1", + "metadata": {}, + "name": "fade", + "effect_name": "AudioFade", + "enabled": true, + "fade_in": true, + "start_time": 2.0, + "duration": 10.5 + } + ], + "markers": [], + "enabled": true, + "media_references": { + "DEFAULT_MEDIA": { + "OTIO_SCHEMA": "ExternalReference.1", + "metadata": {}, + "name": "", + "available_range": null, + "available_image_bounds": null, + "target_url": "unit_test_url" + } + }, + "active_media_reference_key": "DEFAULT_MEDIA" +})"; + + assertEqual(json, expected_json); + + }); + + tests.run(argc, argv); + return 0; +} diff --git a/tests/test_volume_effects.py b/tests/test_volume_effects.py new file mode 100644 index 0000000000..240254c7ad --- /dev/null +++ b/tests/test_volume_effects.py @@ -0,0 +1,122 @@ +"""Volume effects class test harness.""" + +import unittest + +import opentimelineio as otio +import opentimelineio.test_utils as otio_test_utils + +class AudioVolumeTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + scale = otio.schema.AudioVolume( + name="volume", + gain=2.5, + metadata={ + "foo": "bar" + } + ) + self.assertEqual(scale.gain, 2.5) + self.assertEqual(scale.name, "volume") + self.assertEqual(scale.metadata, {"foo": "bar"}) + + def test_eq(self): + scale1 = otio.schema.AudioVolume( + name="volume", + gain=2.5, + metadata={ + "foo": "bar" + } + ) + scale2 = otio.schema.AudioVolume( + name="volume", + gain=2.5, + metadata={ + "foo": "bar" + } + ) + self.assertIsOTIOEquivalentTo(scale1, scale2) + + def test_serialize(self): + scale = otio.schema.AudioVolume( + name="volume", + gain=0.6, + metadata={ + "foo": "bar" + } + ) + encoded = otio.adapters.otio_json.write_to_string(scale) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(scale, decoded) + + def test_setters(self): + scale = otio.schema.AudioVolume( + name="volume", + gain=0.8, + metadata={ + "foo": "bar" + } + ) + self.assertEqual(scale.gain, 0.8) + scale.gain = 0.25 + self.assertEqual(scale.gain, 0.25) + +class AudioFadeTests(unittest.TestCase, otio_test_utils.OTIOAssertions): + def test_constructor(self): + fade = otio.schema.AudioFade( + name="fade", + fade_in=True, + start_time=12.0, + duration=8.0, + metadata={"baz": "qux"} + ) + self.assertEqual(fade.name, "fade") + self.assertEqual(fade.fade_in, True) + self.assertEqual(fade.start_time, 12.0) + self.assertEqual(fade.duration, 8.0) + self.assertEqual(fade.metadata, {"baz": "qux"}) + + def test_eq(self): + fade1 = otio.schema.AudioFade( + name="fade", + fade_in=False, + start_time=5.0, + duration=3.0, + metadata={"baz": "qux"} + ) + fade2 = otio.schema.AudioFade( + name="fade", + fade_in=False, + start_time=5.0, + duration=3.0, + metadata={"baz": "qux"} + ) + self.assertIsOTIOEquivalentTo(fade1, fade2) + + def test_serialize(self): + fade = otio.schema.AudioFade( + name="fade", + fade_in=True, + start_time=2.5, + duration=1.5, + metadata={"baz": "qux"} + ) + encoded = otio.adapters.otio_json.write_to_string(fade) + decoded = otio.adapters.otio_json.read_from_string(encoded) + self.assertIsOTIOEquivalentTo(fade, decoded) + + def test_setters(self): + fade = otio.schema.AudioFade( + name="fade", + fade_in=False, + start_time=4.0, + duration=2.0, + metadata={"baz": "qux"} + ) + self.assertEqual(fade.fade_in, False) + self.assertEqual(fade.start_time, 4.0) + self.assertEqual(fade.duration, 2.0) + fade.fade_in = True + fade.start_time = 7.5 + fade.duration = 3.5 + self.assertEqual(fade.fade_in, True) + self.assertEqual(fade.start_time, 7.5) + self.assertEqual(fade.duration, 3.5)