OpenShot Library | libopenshot  0.2.3
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init max image size
64 
65  // Init cache
66  final_cache = new CacheMemory();
68 }
69 
70 // Add an openshot::Clip to the timeline
72 {
73  // All clips should be converted to the frame rate of this timeline
74  if (auto_map_clips)
75  // Apply framemapper (or update existing framemapper)
76  apply_mapper_to_clip(clip);
77 
78  // Add clip to list
79  clips.push_back(clip);
80 
81  // Sort clips
82  sort_clips();
83 }
84 
85 // Add an effect to the timeline
87 {
88  // Add effect to list
89  effects.push_back(effect);
90 
91  // Sort effects
92  sort_effects();
93 }
94 
95 // Remove an effect from the timeline
97 {
98  effects.remove(effect);
99 }
100 
101 // Remove an openshot::Clip to the timeline
103 {
104  clips.remove(clip);
105 }
106 
107 // Apply a FrameMapper to a clip which matches the settings of this timeline
108 void Timeline::apply_mapper_to_clip(Clip* clip)
109 {
110  // Get lock (prevent getting frames while this happens)
111  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
112 
113  // Determine type of reader
114  ReaderBase* clip_reader = NULL;
115  if (clip->Reader()->Name() == "FrameMapper")
116  {
117  // Get the existing reader
118  clip_reader = (ReaderBase*) clip->Reader();
119 
120  } else {
121 
122  // Create a new FrameMapper to wrap the current reader
124  }
125 
126  // Update the mapping
127  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
129 
130  // Update clip reader
131  clip->Reader(clip_reader);
132 }
133 
134 // Apply the timeline's framerate and samplerate to all clips
136 {
137  // Clear all cached frames
138  ClearAllCache();
139 
140  // Loop through all clips
141  list<Clip*>::iterator clip_itr;
142  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
143  {
144  // Get clip object from the iterator
145  Clip *clip = (*clip_itr);
146 
147  // Apply framemapper (or update existing framemapper)
148  apply_mapper_to_clip(clip);
149  }
150 }
151 
152 // Calculate time of a frame number, based on a framerate
153 double Timeline::calculate_time(int64_t number, Fraction rate)
154 {
155  // Get float version of fps fraction
156  double raw_fps = rate.ToFloat();
157 
158  // Return the time (in seconds) of this frame
159  return double(number - 1) / raw_fps;
160 }
161 
162 // Apply effects to the source frame (if any)
163 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
164 {
165  // Debug output
166  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
167 
168  // Find Effects at this position and layer
169  list<EffectBase*>::iterator effect_itr;
170  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
171  {
172  // Get effect object from the iterator
173  EffectBase *effect = (*effect_itr);
174 
175  // Does clip intersect the current requested time
176  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
177  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
178 
179  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
180 
181  // Debug output
182  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
183 
184  // Clip is visible
185  if (does_effect_intersect)
186  {
187  // Determine the frame needed for this clip (based on the position on the timeline)
188  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
189  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
190 
191  // Debug output
192  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect, "", -1, "", -1, "", -1, "", -1);
193 
194  // Apply the effect to this frame
195  frame = effect->GetFrame(frame, effect_frame_number);
196  }
197 
198  } // end effect loop
199 
200  // Return modified frame
201  return frame;
202 }
203 
204 // Get or generate a blank frame
205 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
206 {
207  std::shared_ptr<Frame> new_frame;
208 
209  // Init some basic properties about this frame
210  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
211 
212  try {
213  // Debug output
214  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
215 
216  // Attempt to get a frame (but this could fail if a reader has just been closed)
217  #pragma omp critical (T_GetOtCreateFrame)
218  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
219 
220  // Return real frame
221  return new_frame;
222 
223  } catch (const ReaderClosed & e) {
224  // ...
225  } catch (const TooManySeeks & e) {
226  // ...
227  } catch (const OutOfBoundsFrame & e) {
228  // ...
229  }
230 
231  // Debug output
232  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
233 
234  // Create blank frame
235  new_frame = std::make_shared<Frame>(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels);
236  #pragma omp critical (T_GetOtCreateFrame)
237  {
238  new_frame->SampleRate(info.sample_rate);
239  new_frame->ChannelsLayout(info.channel_layout);
240  }
241  return new_frame;
242 }
243 
244 // Process a new layer of video or audio
245 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
246 {
247  // Get the clip's frame & image
248  std::shared_ptr<Frame> source_frame;
249  #pragma omp critical (T_addLayer)
250  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
251 
252  // No frame found... so bail
253  if (!source_frame)
254  return;
255 
256  // Debug output
257  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
258 
259  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
260  if (source_clip->Waveform())
261  {
262  // Debug output
263  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
264 
265  // Get the color of the waveform
266  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
267  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
268  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
269  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
270 
271  // Generate Waveform Dynamically (the size of the timeline)
272  std::shared_ptr<QImage> source_image;
273  #pragma omp critical (T_addLayer)
274  source_image = source_frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha);
275  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
276  }
277 
278  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
279  * effects on the top clip. */
280  if (is_top_clip && source_frame)
281  #pragma omp critical (T_addLayer)
282  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
283 
284  // Declare an image to hold the source frame's image
285  std::shared_ptr<QImage> source_image;
286 
287  /* COPY AUDIO - with correct volume */
288  if (source_clip->Reader()->info.has_audio) {
289  // Debug output
290  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
291 
292  if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
293  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
294  {
295  // Get volume from previous frame and this frame
296  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
297  float volume = source_clip->volume.GetValue(clip_frame_number);
298  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
299  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
300 
301  // Apply volume mixing strategy
302  if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
303  // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
304  previous_volume = previous_volume / max_volume;
305  volume = volume / max_volume;
306  }
307  else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
308  // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
309  previous_volume = previous_volume * 0.77;
310  volume = volume * 0.77;
311  }
312 
313  // If channel filter enabled, check for correct channel (and skip non-matching channels)
314  if (channel_filter != -1 && channel_filter != channel)
315  continue; // skip to next channel
316 
317  // If no volume on this frame or previous frame, do nothing
318  if (previous_volume == 0.0 && volume == 0.0)
319  continue; // skip to next channel
320 
321  // If channel mapping disabled, just use the current channel
322  if (channel_mapping == -1)
323  channel_mapping = channel;
324 
325  // Apply ramp to source frame (if needed)
326  if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
327  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
328 
329  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
330  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
331  // number of samples returned is variable... and does not match the number expected.
332  // This is a crude solution at best. =)
333  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
334  // Force timeline frame to match the source frame
335  #pragma omp critical (T_addLayer)
336  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
337 
338  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
339  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
340  #pragma omp critical (T_addLayer)
341  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
342 
343  }
344  else
345  // Debug output
346  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
347 
348  }
349 
350  // Skip out if only an audio frame
351  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
352  // Skip the rest of the image processing for performance reasons
353  return;
354 
355  // Debug output
356  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
357 
358  // Get actual frame image data
359  source_image = source_frame->GetImage();
360 
361  /* ALPHA & OPACITY */
362  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
363  {
364  float alpha = source_clip->alpha.GetValue(clip_frame_number);
365 
366  // Get source image's pixels
367  unsigned char *pixels = (unsigned char *) source_image->bits();
368 
369  // Loop through pixels
370  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
371  {
372  // Get the alpha values from the pixel
373  int A = pixels[byte_index + 3];
374 
375  // Apply alpha to pixel
376  pixels[byte_index + 3] *= alpha;
377  }
378 
379  // Debug output
380  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
381  }
382 
383  /* RESIZE SOURCE IMAGE - based on scale type */
384  QSize source_size = source_image->size();
385  switch (source_clip->scale)
386  {
387  case (SCALE_FIT): {
388  // keep aspect ratio
389  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio);
390 
391  // Debug output
392  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
393  break;
394  }
395  case (SCALE_STRETCH): {
396  // ignore aspect ratio
397  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio);
398 
399  // Debug output
400  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
401  break;
402  }
403  case (SCALE_CROP): {
404  QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height()))));
405  QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT);
406 
407  // respect aspect ratio
408  if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT)
409  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
410  else
411  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
412 
413  // Debug output
414  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
415  break;
416  }
417  case (SCALE_NONE): {
418  // Calculate ratio of source size to project size
419  // Even with no scaling, previews need to be adjusted correctly
420  // (otherwise NONE scaling draws the frame image outside of the preview)
421  float source_width_ratio = source_size.width() / float(info.width);
422  float source_height_ratio = source_size.height() / float(info.height);
423  source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio);
424 
425  // Debug output
426  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_NONE)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
427  break;
428  }
429  }
430 
431  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
432  float x = 0.0; // left
433  float y = 0.0; // top
434 
435  // Adjust size for scale x and scale y
436  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
437  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
438  float scaled_source_width = source_size.width() * sx;
439  float scaled_source_height = source_size.height() * sy;
440 
441  switch (source_clip->gravity)
442  {
443  case (GRAVITY_TOP):
444  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
445  break;
446  case (GRAVITY_TOP_RIGHT):
447  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
448  break;
449  case (GRAVITY_LEFT):
450  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
451  break;
452  case (GRAVITY_CENTER):
453  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
454  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
455  break;
456  case (GRAVITY_RIGHT):
457  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
458  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
459  break;
460  case (GRAVITY_BOTTOM_LEFT):
461  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
462  break;
463  case (GRAVITY_BOTTOM):
464  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
465  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
466  break;
467  case (GRAVITY_BOTTOM_RIGHT):
468  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
469  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
470  break;
471  }
472 
473  // Debug output
474  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
475 
476  /* LOCATION, ROTATION, AND SCALE */
477  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
478  x += (Settings::Instance()->MAX_WIDTH * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
479  y += (Settings::Instance()->MAX_HEIGHT * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
480  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
481  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
482 
483  bool transformed = false;
484  QTransform transform;
485 
486  // Transform source image (if needed)
487  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
488 
489  if (!isEqual(r, 0)) {
490  // ROTATE CLIP
491  float origin_x = x + (scaled_source_width / 2.0);
492  float origin_y = y + (scaled_source_height / 2.0);
493  transform.translate(origin_x, origin_y);
494  transform.rotate(r);
495  transform.translate(-origin_x,-origin_y);
496  transformed = true;
497  }
498 
499  if (!isEqual(x, 0) || !isEqual(y, 0)) {
500  // TRANSLATE/MOVE CLIP
501  transform.translate(x, y);
502  transformed = true;
503  }
504 
505  // SCALE CLIP (if needed)
506  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
507  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
508 
509  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
510  transform.scale(source_width_scale, source_height_scale);
511  transformed = true;
512  }
513 
514  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
515  // SHEAR HEIGHT/WIDTH
516  transform.shear(shear_x, shear_y);
517  transformed = true;
518  }
519 
520  // Debug output
521  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
522 
523  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
524  std::shared_ptr<QImage> new_image;
525  #pragma omp critical (T_addLayer)
526  new_image = new_frame->GetImage();
527 
528  // Load timeline's new frame image into a QPainter
529  QPainter painter(new_image.get());
530  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
531 
532  // Apply transform (translate, rotate, scale)... if any
533  if (transformed)
534  painter.setTransform(transform);
535 
536  // Composite a new layer onto the image
537  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
538  painter.drawImage(0, 0, *source_image);
539 
540  // Draw frame #'s on top of image (if needed)
541  if (source_clip->display != FRAME_DISPLAY_NONE) {
542  stringstream frame_number_str;
543  switch (source_clip->display)
544  {
545  case (FRAME_DISPLAY_CLIP):
546  frame_number_str << clip_frame_number;
547  break;
548 
549  case (FRAME_DISPLAY_TIMELINE):
550  frame_number_str << timeline_frame_number;
551  break;
552 
553  case (FRAME_DISPLAY_BOTH):
554  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
555  break;
556  }
557 
558  // Draw frame number on top of image
559  painter.setPen(QColor("#ffffff"));
560  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
561  }
562 
563  painter.end();
564 
565  // Debug output
566  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
567 }
568 
569 // Update the list of 'opened' clips
570 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
571 {
572  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
573 
574  // is clip already in list?
575  bool clip_found = open_clips.count(clip);
576 
577  if (clip_found && !does_clip_intersect)
578  {
579  // Remove clip from 'opened' list, because it's closed now
580  open_clips.erase(clip);
581 
582  // Close clip
583  clip->Close();
584  }
585  else if (!clip_found && does_clip_intersect)
586  {
587  // Add clip to 'opened' list, because it's missing
588  open_clips[clip] = clip;
589 
590  try {
591  // Open the clip
592  clip->Open();
593 
594  } catch (const InvalidFile & e) {
595  // ...
596  }
597  }
598 
599  // Debug output
600  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
601 }
602 
603 // Sort clips by position on the timeline
604 void Timeline::sort_clips()
605 {
606  // Debug output
607  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
608 
609  // sort clips
610  clips.sort(CompareClips());
611 }
612 
613 // Sort effects by position on the timeline
614 void Timeline::sort_effects()
615 {
616  // sort clips
617  effects.sort(CompareEffects());
618 }
619 
620 // Close the reader (and any resources it was consuming)
622 {
623  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
624 
625  // Close all open clips
626  list<Clip*>::iterator clip_itr;
627  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
628  {
629  // Get clip object from the iterator
630  Clip *clip = (*clip_itr);
631 
632  // Open or Close this clip, based on if it's intersecting or not
633  update_open_clips(clip, false);
634  }
635 
636  // Mark timeline as closed
637  is_open = false;
638 
639  // Clear cache
640  final_cache->Clear();
641 }
642 
643 // Open the reader (and start consuming resources)
645 {
646  is_open = true;
647 }
648 
649 // Compare 2 floating point numbers for equality
650 bool Timeline::isEqual(double a, double b)
651 {
652  return fabs(a - b) < 0.000001;
653 }
654 
655 // Get an openshot::Frame object for a specific frame number of this reader.
656 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
657 {
658  // Adjust out of bounds frame number
659  if (requested_frame < 1)
660  requested_frame = 1;
661 
662  // Check cache
663  std::shared_ptr<Frame> frame;
664  #pragma omp critical (T_GetFrame)
665  frame = final_cache->GetFrame(requested_frame);
666  if (frame) {
667  // Debug output
668  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
669 
670  // Return cached frame
671  return frame;
672  }
673  else
674  {
675  // Create a scoped lock, allowing only a single thread to run the following code at one time
676  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
677 
678  // Check for open reader (or throw exception)
679  if (!is_open)
680  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
681 
682  // Check cache again (due to locking)
683  #pragma omp critical (T_GetFrame)
684  frame = final_cache->GetFrame(requested_frame);
685  if (frame) {
686  // Debug output
687  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
688 
689  // Return cached frame
690  return frame;
691  }
692 
693  // Minimum number of frames to process (for performance reasons)
694  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
695 
696  // Get a list of clips that intersect with the requested section of timeline
697  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
698  vector<Clip*> nearby_clips;
699  #pragma omp critical (T_GetFrame)
700  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
701 
702  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
703  // Allow nested OpenMP sections
704  omp_set_nested(true);
705 
706  // Debug output
707  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
708 
709  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
710  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
711  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
712  {
713  // Loop through clips
714  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
715  {
716  // Get clip object from the iterator
717  Clip *clip = nearby_clips[clip_index];
718  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
719  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
720 
721  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
722  if (does_clip_intersect)
723  {
724  // Get clip frame #
725  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
726  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
727  // Cache clip object
728  clip->GetFrame(clip_frame_number);
729  }
730  }
731  }
732 
733  #pragma omp parallel
734  {
735  // Loop through all requested frames
736  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
737  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
738  {
739  // Debug output
740  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
741 
742  // Init some basic properties about this frame
743  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
744 
745  // Create blank frame (which will become the requested frame)
746  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels));
747  #pragma omp critical (T_GetFrame)
748  {
749  new_frame->AddAudioSilence(samples_in_frame);
750  new_frame->SampleRate(info.sample_rate);
751  new_frame->ChannelsLayout(info.channel_layout);
752  }
753 
754  // Debug output
755  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
756 
757  // Add Background Color to 1st layer (if animated or not black)
758  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
759  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
760  new_frame->AddColor(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, color.GetColorHex(frame_number));
761 
762  // Debug output
763  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1);
764 
765  // Find Clips near this time
766  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
767  {
768  // Get clip object from the iterator
769  Clip *clip = nearby_clips[clip_index];
770  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
771  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
772 
773  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
774 
775  // Debug output
776  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1, "", -1);
777 
778  // Clip is visible
779  if (does_clip_intersect)
780  {
781  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
782  bool is_top_clip = true;
783  float max_volume = 0.0;
784  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
785  {
786  Clip *nearby_clip = nearby_clips[top_clip_index];
787  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
788  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
789  long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
790  long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
791 
792  // Determine if top clip
793  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
794  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
795  nearby_clip_start_position > clip_start_position && is_top_clip == true) {
796  is_top_clip = false;
797  }
798 
799  // Determine max volume of overlapping clips
800  if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
801  nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
802  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
803  max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
804  }
805  }
806 
807  // Determine the frame needed for this clip (based on the position on the timeline)
808  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
809  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
810 
811  // Debug output
812  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number, "", -1, "", -1);
813 
814  // Add clip's frame as layer
815  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
816 
817  } else
818  // Debug output
819  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1, "", -1);
820 
821  } // end clip loop
822 
823  // Debug output
824  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
825 
826  // Set frame # on mapped frame
827  #pragma omp ordered
828  {
829  new_frame->SetFrameNumber(frame_number);
830 
831  // Add final frame to cache
832  final_cache->Add(new_frame);
833  }
834 
835  } // end frame loop
836  } // end parallel
837 
838  // Debug output
839  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
840 
841  // Return frame (or blank frame)
842  return final_cache->GetFrame(requested_frame);
843  }
844 }
845 
846 
847 // Find intersecting clips (or non intersecting clips)
848 vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
849 {
850  // Find matching clips
851  vector<Clip*> matching_clips;
852 
853  // Calculate time of frame
854  float min_requested_frame = requested_frame;
855  float max_requested_frame = requested_frame + (number_of_frames - 1);
856 
857  // Re-Sort Clips (since they likely changed)
858  sort_clips();
859 
860  // Find Clips at this time
861  list<Clip*>::iterator clip_itr;
862  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
863  {
864  // Get clip object from the iterator
865  Clip *clip = (*clip_itr);
866 
867  // Does clip intersect the current requested time
868  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
869  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
870 
871  bool does_clip_intersect =
872  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
873  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
874 
875  // Debug output
876  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect, "", -1);
877 
878  // Open (or schedule for closing) this clip, based on if it's intersecting or not
879  #pragma omp critical (reader_lock)
880  update_open_clips(clip, does_clip_intersect);
881 
882  // Clip is visible
883  if (does_clip_intersect && include)
884  // Add the intersecting clip
885  matching_clips.push_back(clip);
886 
887  else if (!does_clip_intersect && !include)
888  // Add the non-intersecting clip
889  matching_clips.push_back(clip);
890 
891  } // end clip loop
892 
893  // return list
894  return matching_clips;
895 }
896 
897 // Get the cache object used by this reader
898 void Timeline::SetCache(CacheBase* new_cache) {
899  // Set new cache
900  final_cache = new_cache;
901 }
902 
903 // Generate JSON string of this object
904 string Timeline::Json() {
905 
906  // Return formatted string
907  return JsonValue().toStyledString();
908 }
909 
910 // Generate Json::JsonValue for this object
911 Json::Value Timeline::JsonValue() {
912 
913  // Create root json object
914  Json::Value root = ReaderBase::JsonValue(); // get parent properties
915  root["type"] = "Timeline";
916  root["viewport_scale"] = viewport_scale.JsonValue();
917  root["viewport_x"] = viewport_x.JsonValue();
918  root["viewport_y"] = viewport_y.JsonValue();
919  root["color"] = color.JsonValue();
920 
921  // Add array of clips
922  root["clips"] = Json::Value(Json::arrayValue);
923 
924  // Find Clips at this time
925  list<Clip*>::iterator clip_itr;
926  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
927  {
928  // Get clip object from the iterator
929  Clip *existing_clip = (*clip_itr);
930  root["clips"].append(existing_clip->JsonValue());
931  }
932 
933  // Add array of effects
934  root["effects"] = Json::Value(Json::arrayValue);
935 
936  // loop through effects
937  list<EffectBase*>::iterator effect_itr;
938  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
939  {
940  // Get clip object from the iterator
941  EffectBase *existing_effect = (*effect_itr);
942  root["effects"].append(existing_effect->JsonValue());
943  }
944 
945  // return JsonValue
946  return root;
947 }
948 
949 // Load JSON string into this object
950 void Timeline::SetJson(string value) {
951 
952  // Get lock (prevent getting frames while this happens)
953  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
954 
955  // Parse JSON string into JSON objects
956  Json::Value root;
957  Json::Reader reader;
958  bool success = reader.parse( value, root );
959  if (!success)
960  // Raise exception
961  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
962 
963  try
964  {
965  // Set all values that match
966  SetJsonValue(root);
967  }
968  catch (exception e)
969  {
970  // Error parsing JSON (or missing keys)
971  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
972  }
973 }
974 
975 // Load Json::JsonValue into this object
976 void Timeline::SetJsonValue(Json::Value root) {
977 
978  // Close timeline before we do anything (this also removes all open and closing clips)
979  bool was_open = is_open;
980  Close();
981 
982  // Set parent data
984 
985  if (!root["clips"].isNull()) {
986  // Clear existing clips
987  clips.clear();
988 
989  // loop through clips
990  for (int x = 0; x < root["clips"].size(); x++) {
991  // Get each clip
992  Json::Value existing_clip = root["clips"][x];
993 
994  // Create Clip
995  Clip *c = new Clip();
996 
997  // Load Json into Clip
998  c->SetJsonValue(existing_clip);
999 
1000  // Add Clip to Timeline
1001  AddClip(c);
1002  }
1003  }
1004 
1005  if (!root["effects"].isNull()) {
1006  // Clear existing effects
1007  effects.clear();
1008 
1009  // loop through effects
1010  for (int x = 0; x < root["effects"].size(); x++) {
1011  // Get each effect
1012  Json::Value existing_effect = root["effects"][x];
1013 
1014  // Create Effect
1015  EffectBase *e = NULL;
1016 
1017  if (!existing_effect["type"].isNull()) {
1018  // Create instance of effect
1019  if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
1020 
1021  // Load Json into Effect
1022  e->SetJsonValue(existing_effect);
1023 
1024  // Add Effect to Timeline
1025  AddEffect(e);
1026  }
1027  }
1028  }
1029  }
1030 
1031  if (!root["duration"].isNull()) {
1032  // Update duration of timeline
1033  info.duration = root["duration"].asDouble();
1035  }
1036 
1037  // Re-open if needed
1038  if (was_open)
1039  Open();
1040 }
1041 
1042 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1043 void Timeline::ApplyJsonDiff(string value) {
1044 
1045  // Get lock (prevent getting frames while this happens)
1046  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1047 
1048  // Parse JSON string into JSON objects
1049  Json::Value root;
1050  Json::Reader reader;
1051  bool success = reader.parse( value, root );
1052  if (!success || !root.isArray())
1053  // Raise exception
1054  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
1055 
1056  try
1057  {
1058  // Process the JSON change array, loop through each item
1059  for (int x = 0; x < root.size(); x++) {
1060  // Get each change
1061  Json::Value change = root[x];
1062  string root_key = change["key"][(uint)0].asString();
1063 
1064  // Process each type of change
1065  if (root_key == "clips")
1066  // Apply to CLIPS
1067  apply_json_to_clips(change);
1068 
1069  else if (root_key == "effects")
1070  // Apply to EFFECTS
1071  apply_json_to_effects(change);
1072 
1073  else
1074  // Apply to TIMELINE
1075  apply_json_to_timeline(change);
1076 
1077  }
1078  }
1079  catch (exception e)
1080  {
1081  // Error parsing JSON (or missing keys)
1082  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1083  }
1084 }
1085 
1086 // Apply JSON diff to clips
1087 void Timeline::apply_json_to_clips(Json::Value change) {
1088 
1089  // Get key and type of change
1090  string change_type = change["type"].asString();
1091  string clip_id = "";
1092  Clip *existing_clip = NULL;
1093 
1094  // Find id of clip (if any)
1095  for (int x = 0; x < change["key"].size(); x++) {
1096  // Get each change
1097  Json::Value key_part = change["key"][x];
1098 
1099  if (key_part.isObject()) {
1100  // Check for id
1101  if (!key_part["id"].isNull()) {
1102  // Set the id
1103  clip_id = key_part["id"].asString();
1104 
1105  // Find matching clip in timeline (if any)
1106  list<Clip*>::iterator clip_itr;
1107  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1108  {
1109  // Get clip object from the iterator
1110  Clip *c = (*clip_itr);
1111  if (c->Id() == clip_id) {
1112  existing_clip = c;
1113  break; // clip found, exit loop
1114  }
1115  }
1116  break; // id found, exit loop
1117  }
1118  }
1119  }
1120 
1121  // Check for a more specific key (targetting this clip's effects)
1122  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1123  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1124  {
1125  // This change is actually targetting a specific effect under a clip (and not the clip)
1126  Json::Value key_part = change["key"][3];
1127 
1128  if (key_part.isObject()) {
1129  // Check for id
1130  if (!key_part["id"].isNull())
1131  {
1132  // Set the id
1133  string effect_id = key_part["id"].asString();
1134 
1135  // Find matching effect in timeline (if any)
1136  list<EffectBase*> effect_list = existing_clip->Effects();
1137  list<EffectBase*>::iterator effect_itr;
1138  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1139  {
1140  // Get effect object from the iterator
1141  EffectBase *e = (*effect_itr);
1142  if (e->Id() == effect_id) {
1143  // Apply the change to the effect directly
1144  apply_json_to_effects(change, e);
1145 
1146  // Calculate start and end frames that this impacts, and remove those frames from the cache
1147  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1148  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1149  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1150 
1151  return; // effect found, don't update clip
1152  }
1153  }
1154  }
1155  }
1156  }
1157 
1158  // Calculate start and end frames that this impacts, and remove those frames from the cache
1159  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1160  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1161  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1162  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1163  }
1164 
1165  // Determine type of change operation
1166  if (change_type == "insert") {
1167 
1168  // Create new clip
1169  Clip *clip = new Clip();
1170  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1171  AddClip(clip); // Add clip to timeline
1172 
1173  // Apply framemapper (or update existing framemapper)
1174  apply_mapper_to_clip(clip);
1175 
1176  } else if (change_type == "update") {
1177 
1178  // Update existing clip
1179  if (existing_clip) {
1180 
1181  // Calculate start and end frames that this impacts, and remove those frames from the cache
1182  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1183  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1184  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1185 
1186  // Remove cache on clip's Reader (if found)
1187  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1188  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1189 
1190  // Update clip properties from JSON
1191  existing_clip->SetJsonValue(change["value"]);
1192 
1193  // Apply framemapper (or update existing framemapper)
1194  apply_mapper_to_clip(existing_clip);
1195  }
1196 
1197  } else if (change_type == "delete") {
1198 
1199  // Remove existing clip
1200  if (existing_clip) {
1201 
1202  // Calculate start and end frames that this impacts, and remove those frames from the cache
1203  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1204  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1205  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1206 
1207  // Remove clip from timeline
1208  RemoveClip(existing_clip);
1209  }
1210 
1211  }
1212 
1213 }
1214 
1215 // Apply JSON diff to effects
1216 void Timeline::apply_json_to_effects(Json::Value change) {
1217 
1218  // Get key and type of change
1219  string change_type = change["type"].asString();
1220  EffectBase *existing_effect = NULL;
1221 
1222  // Find id of an effect (if any)
1223  for (int x = 0; x < change["key"].size(); x++) {
1224  // Get each change
1225  Json::Value key_part = change["key"][x];
1226 
1227  if (key_part.isObject()) {
1228  // Check for id
1229  if (!key_part["id"].isNull())
1230  {
1231  // Set the id
1232  string effect_id = key_part["id"].asString();
1233 
1234  // Find matching effect in timeline (if any)
1235  list<EffectBase*>::iterator effect_itr;
1236  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1237  {
1238  // Get effect object from the iterator
1239  EffectBase *e = (*effect_itr);
1240  if (e->Id() == effect_id) {
1241  existing_effect = e;
1242  break; // effect found, exit loop
1243  }
1244  }
1245  break; // id found, exit loop
1246  }
1247  }
1248  }
1249 
1250  // Now that we found the effect, apply the change to it
1251  if (existing_effect || change_type == "insert")
1252  // Apply change to effect
1253  apply_json_to_effects(change, existing_effect);
1254 }
1255 
1256 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1257 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1258 
1259  // Get key and type of change
1260  string change_type = change["type"].asString();
1261 
1262  // Calculate start and end frames that this impacts, and remove those frames from the cache
1263  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1264  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1265  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1266  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1267  }
1268 
1269  // Determine type of change operation
1270  if (change_type == "insert") {
1271 
1272  // Determine type of effect
1273  string effect_type = change["value"]["type"].asString();
1274 
1275  // Create Effect
1276  EffectBase *e = NULL;
1277 
1278  // Init the matching effect object
1279  if (e = EffectInfo().CreateEffect(effect_type)) {
1280 
1281  // Load Json into Effect
1282  e->SetJsonValue(change["value"]);
1283 
1284  // Add Effect to Timeline
1285  AddEffect(e);
1286  }
1287 
1288  } else if (change_type == "update") {
1289 
1290  // Update existing effect
1291  if (existing_effect) {
1292 
1293  // Calculate start and end frames that this impacts, and remove those frames from the cache
1294  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1295  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1296  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1297 
1298  // Update effect properties from JSON
1299  existing_effect->SetJsonValue(change["value"]);
1300  }
1301 
1302  } else if (change_type == "delete") {
1303 
1304  // Remove existing effect
1305  if (existing_effect) {
1306 
1307  // Calculate start and end frames that this impacts, and remove those frames from the cache
1308  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1309  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1310  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1311 
1312  // Remove effect from timeline
1313  RemoveEffect(existing_effect);
1314  }
1315 
1316  }
1317 }
1318 
1319 // Apply JSON diff to timeline properties
1320 void Timeline::apply_json_to_timeline(Json::Value change) {
1321 
1322  // Get key and type of change
1323  string change_type = change["type"].asString();
1324  string root_key = change["key"][(uint)0].asString();
1325  string sub_key = "";
1326  if (change["key"].size() >= 2)
1327  sub_key = change["key"][(uint)1].asString();
1328 
1329  // Clear entire cache
1330  final_cache->Clear();
1331 
1332  // Determine type of change operation
1333  if (change_type == "insert" || change_type == "update") {
1334 
1335  // INSERT / UPDATE
1336  // Check for valid property
1337  if (root_key == "color")
1338  // Set color
1339  color.SetJsonValue(change["value"]);
1340  else if (root_key == "viewport_scale")
1341  // Set viewport scale
1342  viewport_scale.SetJsonValue(change["value"]);
1343  else if (root_key == "viewport_x")
1344  // Set viewport x offset
1345  viewport_x.SetJsonValue(change["value"]);
1346  else if (root_key == "viewport_y")
1347  // Set viewport y offset
1348  viewport_y.SetJsonValue(change["value"]);
1349  else if (root_key == "duration") {
1350  // Update duration of timeline
1351  info.duration = change["value"].asDouble();
1353  }
1354  else if (root_key == "width")
1355  // Set width
1356  info.width = change["value"].asInt();
1357  else if (root_key == "height")
1358  // Set height
1359  info.height = change["value"].asInt();
1360  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1361  // Set fps fraction
1362  if (!change["value"]["num"].isNull())
1363  info.fps.num = change["value"]["num"].asInt();
1364  if (!change["value"]["den"].isNull())
1365  info.fps.den = change["value"]["den"].asInt();
1366  }
1367  else if (root_key == "fps" && sub_key == "num")
1368  // Set fps.num
1369  info.fps.num = change["value"].asInt();
1370  else if (root_key == "fps" && sub_key == "den")
1371  // Set fps.den
1372  info.fps.den = change["value"].asInt();
1373  else if (root_key == "sample_rate")
1374  // Set sample rate
1375  info.sample_rate = change["value"].asInt();
1376  else if (root_key == "channels")
1377  // Set channels
1378  info.channels = change["value"].asInt();
1379  else if (root_key == "channel_layout")
1380  // Set channel layout
1381  info.channel_layout = (ChannelLayout) change["value"].asInt();
1382 
1383  else
1384 
1385  // Error parsing JSON (or missing keys)
1386  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1387 
1388 
1389  } else if (change["type"].asString() == "delete") {
1390 
1391  // DELETE / RESET
1392  // Reset the following properties (since we can't delete them)
1393  if (root_key == "color") {
1394  color = Color();
1395  color.red = Keyframe(0.0);
1396  color.green = Keyframe(0.0);
1397  color.blue = Keyframe(0.0);
1398  }
1399  else if (root_key == "viewport_scale")
1400  viewport_scale = Keyframe(1.0);
1401  else if (root_key == "viewport_x")
1402  viewport_x = Keyframe(0.0);
1403  else if (root_key == "viewport_y")
1404  viewport_y = Keyframe(0.0);
1405  else
1406  // Error parsing JSON (or missing keys)
1407  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1408 
1409  }
1410 
1411 }
1412 
1413 // Clear all caches
1415 
1416  // Get lock (prevent getting frames while this happens)
1417  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1418 
1419  // Clear primary cache
1420  final_cache->Clear();
1421 
1422  // Loop through all clips
1423  list<Clip*>::iterator clip_itr;
1424  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1425  {
1426  // Get clip object from the iterator
1427  Clip *clip = (*clip_itr);
1428 
1429  // Clear cache on clip
1430  clip->Reader()->GetCache()->Clear();
1431 
1432  // Clear nested Reader (if any)
1433  if (clip->Reader()->Name() == "FrameMapper") {
1434  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1435  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1436  nested_reader->Reader()->GetCache()->Clear();
1437  }
1438 
1439  }
1440 }
1441 
1442 // Set Max Image Size (used for performance optimization). Convenience function for setting
1443 // Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT.
1444 void Timeline::SetMaxSize(int width, int height) {
1445  // Init max image size (choose the smallest one)
1446  Settings::Instance()->MAX_WIDTH = min(width, info.width);
1447  Settings::Instance()->MAX_HEIGHT = min(height, info.height);
1448 }
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:976
Display the timeline's frame number.
Definition: Enums.h:69
void Close()
Close the internal reader.
Definition: Clip.cpp:259
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:904
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:92
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:215
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:67
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:101
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:250
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:898
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:352
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:145
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:225
Do not scale the clip.
Definition: Enums.h:54
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:911
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:77
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:252
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:97
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:82
#define OPEN_MP_NUM_PROCESSORS
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:254
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
Do not display the frame number.
Definition: Enums.h:67
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:295
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:228
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:81
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:811
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:393
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:216
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:217
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:100
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:89
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
virtual std::shared_ptr< Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:238
bool Waveform()
Waveform property.
Definition: Clip.h:210
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:75
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:146
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
Align clip to the bottom center of its parent.
Definition: Enums.h:44
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
void SetMaxSize(int width, int height)
Definition: Timeline.cpp:1444
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:80
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:250
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1414
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:81
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition: Timeline.cpp:135
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:219
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:173
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:148
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:656
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:251
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:71
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:114
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:121
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:621
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:221
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:169
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip's internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:239
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:77
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:242
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:78
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:644
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1043
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Display both the clip's and timeline's frame number.
Definition: Enums.h:70
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:102
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:96
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:220
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:251
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:214
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: Settings.cpp:38
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:149
Color color
Background color of timeline canvas.
Definition: Timeline.h:255
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:950
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:86
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:521
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:730
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:85
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:83
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
Exception when too many seek attempts happen.
Definition: Exceptions.h:254