28 #include "../include/Clip.h"
30 using namespace openshot;
33 void Clip::init_settings()
60 init_reader_rotation();
67 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
97 manage_reader =
false;
101 void Clip::init_reader_rotation() {
111 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
115 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
117 }
catch (exception
e) {}
125 Clip::Clip() : reader(NULL), resampler(NULL), audio_cache(NULL)
146 Clip::Clip(
string path) : reader(NULL), resampler(NULL), audio_cache(NULL)
152 string ext = get_file_extension(path);
153 transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
156 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
157 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob")
188 manage_reader =
true;
189 init_reader_rotation();
197 if (manage_reader && reader) {
216 init_reader_rotation();
226 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
243 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
250 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::Close",
"", -1,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
257 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
273 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
288 requested_frame = adjust_frame_number_minimum(requested_frame);
294 else if (enabled_audio == -1 && reader && !reader->
info.
has_audio)
299 else if (enabled_video == -1 && reader && !reader->
info.
has_audio)
303 int64_t new_frame_number = requested_frame;
304 int64_t time_mapped_number = adjust_frame_number_minimum(
time.
GetLong(requested_frame));
306 new_frame_number = time_mapped_number;
309 std::shared_ptr<Frame> original_frame;
310 #pragma omp critical (Clip_GetFrame)
311 original_frame = GetOrCreateFrame(new_frame_number);
314 std::shared_ptr<Frame> frame(
new Frame(new_frame_number, 1, 1,
"#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
315 #pragma omp critical (Clip_GetFrame)
317 frame->SampleRate(original_frame->SampleRate());
318 frame->ChannelsLayout(original_frame->ChannelsLayout());
323 frame->AddImage(std::shared_ptr<QImage>(
new QImage(*original_frame->GetImage())));
327 for (
int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
328 frame->AddAudio(
true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
331 std::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
334 apply_effects(new_frame);
341 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
345 string Clip::get_file_extension(
string path)
348 return path.substr(path.find_last_of(
".") + 1);
352 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
354 int number_of_samples = buffer->getNumSamples();
355 int channels = buffer->getNumChannels();
358 AudioSampleBuffer *reversed =
new juce::AudioSampleBuffer(channels, number_of_samples);
361 for (
int channel = 0; channel < channels; channel++)
364 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
365 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
371 for (
int channel = 0; channel < channels; channel++)
373 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
380 std::shared_ptr<Frame> Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
385 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.",
"");
391 std::shared_ptr<Frame> new_frame;
394 juce::AudioSampleBuffer *samples = NULL;
399 int new_frame_number = adjust_frame_number_minimum(round(
time.
GetValue(frame_number)));
403 new_frame = std::make_shared<Frame>(new_frame_number, 1, 1,
"#000000", samples_in_frame, frame->GetAudioChannelsCount());
406 new_frame->AddImage(std::shared_ptr<QImage>(
new QImage(*GetOrCreateFrame(new_frame_number)->GetImage())));
414 int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
422 AudioSampleBuffer *resampled_buffer = NULL;
423 int resampled_buffer_size = 0;
426 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
430 for (
int channel = 0; channel < channels; channel++)
432 samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
433 number_of_samples, 1.0f);
437 reverse_buffer(samples);
446 resampled_buffer_size = resampled_buffer->getNumSamples();
452 for (
int channel = 0; channel < channels; channel++)
454 new_frame->AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel, start),
455 number_of_samples, 1.0f);
458 resampled_buffer = NULL;
461 else if (abs(delta) > 1 && abs(delta) < 100) {
465 int total_delta_samples = 0;
466 for (
int delta_frame = new_frame_number - (delta - 1);
467 delta_frame <= new_frame_number; delta_frame++)
473 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
477 for (
int delta_frame = new_frame_number - (delta - 1);
478 delta_frame <= new_frame_number; delta_frame++) {
480 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
481 AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
482 number_of_delta_samples);
483 delta_samples->clear();
485 for (
int channel = 0; channel < channels; channel++)
486 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
487 number_of_delta_samples, 1.0f);
491 reverse_buffer(delta_samples);
494 for (
int channel = 0; channel < channels; channel++)
496 samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
497 number_of_delta_samples, 1.0f);
500 delete delta_samples;
501 delta_samples = NULL;
504 start += number_of_delta_samples;
509 int total_delta_samples = 0;
510 for (
int delta_frame = new_frame_number - (delta + 1);
511 delta_frame >= new_frame_number; delta_frame--)
517 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
521 for (
int delta_frame = new_frame_number - (delta + 1);
522 delta_frame >= new_frame_number; delta_frame--) {
524 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
525 AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
526 number_of_delta_samples);
527 delta_samples->clear();
529 for (
int channel = 0; channel < channels; channel++)
530 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
531 number_of_delta_samples, 1.0f);
535 reverse_buffer(delta_samples);
538 for (
int channel = 0; channel < channels; channel++)
540 samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
541 number_of_delta_samples, 1.0f);
544 delete delta_samples;
545 delta_samples = NULL;
548 start += number_of_delta_samples;
553 resampler->
SetBuffer(samples,
float(start) /
float(number_of_samples));
557 int resampled_buffer_size = buffer->getNumSamples();
560 for (
int channel = 0; channel < channels; channel++)
562 new_frame->AddAudio(
true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
569 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
573 for (
int channel = 0; channel < channels; channel++)
575 samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
579 reverse_buffer(samples);
582 for (
int channel = 0; channel < channels; channel++)
583 new_frame->AddAudio(
true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
601 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
604 if (frame_number < 1)
612 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
614 std::shared_ptr<Frame> new_frame;
621 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::GetOrCreateFrame (from reader)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
653 new_frame = reader->
GetFrame(number);
668 ZmqLogger::Instance()->
AppendDebugMethod(
"Clip::GetOrCreateFrame (create blank)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
674 new_frame->AddAudioSilence(samples_in_frame);
690 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
691 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
693 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
694 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
695 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
700 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
757 return root.toStyledString();
766 root[
"scale"] =
scale;
770 root[
"waveform"] = waveform;
800 root[
"effects"] = Json::Value(Json::arrayValue);
803 list<EffectBase*>::iterator effect_itr;
804 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
808 root[
"effects"].append(existing_effect->
JsonValue());
824 bool success = reader.parse( value, root );
827 throw InvalidJSON(
"JSON could not be parsed (or is invalid)",
"");
837 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
848 if (!root[
"gravity"].isNull())
850 if (!root[
"scale"].isNull())
852 if (!root[
"anchor"].isNull())
854 if (!root[
"display"].isNull())
856 if (!root[
"mixing"].isNull())
858 if (!root[
"waveform"].isNull())
859 waveform = root[
"waveform"].asBool();
860 if (!root[
"scale_x"].isNull())
862 if (!root[
"scale_y"].isNull())
864 if (!root[
"location_x"].isNull())
866 if (!root[
"location_y"].isNull())
868 if (!root[
"alpha"].isNull())
870 if (!root[
"rotation"].isNull())
872 if (!root[
"time"].isNull())
874 if (!root[
"volume"].isNull())
876 if (!root[
"wave_color"].isNull())
878 if (!root[
"crop_width"].isNull())
880 if (!root[
"crop_height"].isNull())
882 if (!root[
"crop_x"].isNull())
884 if (!root[
"crop_y"].isNull())
886 if (!root[
"shear_x"].isNull())
888 if (!root[
"shear_y"].isNull())
890 if (!root[
"channel_filter"].isNull())
892 if (!root[
"channel_mapping"].isNull())
894 if (!root[
"has_audio"].isNull())
896 if (!root[
"has_video"].isNull())
898 if (!root[
"perspective_c1_x"].isNull())
900 if (!root[
"perspective_c1_y"].isNull())
902 if (!root[
"perspective_c2_x"].isNull())
904 if (!root[
"perspective_c2_y"].isNull())
906 if (!root[
"perspective_c3_x"].isNull())
908 if (!root[
"perspective_c3_y"].isNull())
910 if (!root[
"perspective_c4_x"].isNull())
912 if (!root[
"perspective_c4_y"].isNull())
914 if (!root[
"effects"].isNull()) {
920 for (
int x = 0; x < root[
"effects"].size(); x++) {
922 Json::Value existing_effect = root[
"effects"][x];
927 if (!existing_effect[
"type"].isNull()) {
940 if (!root[
"reader"].isNull())
942 if (!root[
"reader"][
"type"].isNull())
945 bool already_open =
false;
949 already_open = reader->
IsOpen();
958 string type = root[
"reader"][
"type"].asString();
960 if (type ==
"FFmpegReader") {
963 reader =
new FFmpegReader(root[
"reader"][
"path"].asString(),
false);
966 }
else if (type ==
"QtImageReader") {
969 reader =
new QtImageReader(root[
"reader"][
"path"].asString(),
false);
972 #ifdef USE_IMAGEMAGICK
973 }
else if (type ==
"ImageReader") {
976 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
979 }
else if (type ==
"TextReader") {
986 }
else if (type ==
"ChunkReader") {
989 reader =
new ChunkReader(root[
"reader"][
"path"].asString(), (
ChunkVersion) root[
"reader"][
"chunk_version"].asInt());
992 }
else if (type ==
"DummyReader") {
1001 manage_reader =
true;
1012 void Clip::sort_effects()
1022 effects.push_back(effect);
1031 effects.remove(effect);
1035 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1038 list<EffectBase*>::iterator effect_itr;
1039 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1045 frame = effect->
GetFrame(frame, frame->number);
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
int max_height
The maximium image height needed by this clip (used for optimizations)
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Display the timeline's frame number.
Point GetMaxPoint()
Get max point (by Y coordinate)
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Close()
Close the internal reader.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
int num
Numerator for the fraction.
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
This abstract class is the base class, used by all effects in libopenshot.
EffectBase * CreateEffect(string effect_type)
Align clip to the right of its parent (middle aligned)
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Keyframe green
Curve representing the green value (0 - 255)
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
string previous_properties
This string contains the previous JSON properties.
float End()
Override End() method.
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Align clip to the bottom right of its parent.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
int width
The width of the video (in pixesl)
Keyframe volume
Curve representing the volume (0 to 1)
int max_width
The maximum image width needed by this clip (used for optimizations)
This class represents a single frame of video (i.e. image & audio data)
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
This class is used as a simple, dummy reader, which always returns a blank frame. ...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Keyframe red
Curve representing the red value (0 - 255)
float duration
Length of time (in seconds)
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
Json::Value add_property_json(string name, float value, string type, string memo, Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame)
Generate JSON for a property.
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
void AddEffect(EffectBase *effect)
Add an effect to the clip.
virtual void Close()=0
Close the reader (and any resources it was consuming)
This abstract class is the base class, used by all readers in libopenshot.
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Do not display the frame number.
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Color wave_color
Curve representing the color of the audio wave form.
Align clip to the top right of its parent.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Align clip to the bottom left of its parent.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Do not apply any volume mixing adjustments. Just add the samples together.
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
virtual std::shared_ptr< Frame > GetFrame(int64_t number)=0
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
bool has_audio
Determines if this file has an audio stream.
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
double Y
The Y value of the coordinate (usually representing the value of the property being animated) ...
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Keyframe blue
Curve representing the red value (0 - 255)
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
int height
The height of the video (in pixels)
Align clip to the bottom center of its parent.
Align clip to the top left of its parent.
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
string Id()
Get basic properties.
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any). Useful for debugging.
Keyframe channel_filter
Audio channel filter and mappings.
float Position()
Get position on timeline (in seconds)
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
FrameDisplayType display
The format to display the frame number (if any)
void SetJson(string value)
Load JSON string into this object.
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Align clip to the left of its parent (middle aligned)
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Keyframe rotation
Curve representing the rotation (0 to 360)
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Fraction GetRepeatFraction(int64_t index)
Get the fraction that represents how many times this value is repeated in the curve.
Scale the clip until both height and width fill the canvas (distort to fit)
Display the clip's internal frame number.
vector< Point > Points
Vector of all Points.
double GetDelta(int64_t index)
Get the change in Y value (from the previous Y value)
ReaderInfo info
Information about the current media file.
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
string PropertiesJSON(int64_t requested_frame)
Clip()
Default Constructor.
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
AnchorType
This enumeration determines what parent a clip should be aligned to.
float end
The position in seconds to end playing (used to trim the ending of a clip)
Exception for frames that are out of bounds.
std::map< string, string > metadata
An optional map/dictionary of metadata for this reader.
void Open()
Open the internal reader.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
This class represents a color (used on the timeline and clips)
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Align clip to the center of its parent (middle aligned)
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
double GetValue(int64_t index)
Get the value at a specific index.
Display both the clip's and timeline's frame number.
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
Coordinate co
This is the primary coordinate.
AnchorType anchor
The anchor determines what parent a clip should snap to.
Exception for invalid JSON.
int64_t GetLong(int64_t index)
Get the rounded LONG value at a specific index.
Keyframe alpha
Curve representing the alpha (1 to 0)
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
This class returns a listing of all effects supported by libopenshot.
Align clip to the top center of its parent.
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
float Duration()
Get the length of this clip (in seconds)
GravityType
This enumeration determines how clips are aligned to their parent container.
Anchor the clip to the canvas.
string Json()
Get and Set JSON methods.
float Start()
Get start position (in seconds) of clip (trim start of video)
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Exception when too many seek attempts happen.
ReaderBase * Reader()
Get the current reader.
virtual bool IsOpen()=0
Determine if reader is open or closed.
This class is used to resample audio data for many sequential frames.
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)