OpenShot Library | libopenshot  0.2.2
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init max image size
64 
65  // Init cache
66  final_cache = new CacheMemory();
68 }
69 
70 // Add an openshot::Clip to the timeline
72 {
73  // All clips should be converted to the frame rate of this timeline
74  if (auto_map_clips)
75  // Apply framemapper (or update existing framemapper)
76  apply_mapper_to_clip(clip);
77 
78  // Add clip to list
79  clips.push_back(clip);
80 
81  // Sort clips
82  sort_clips();
83 }
84 
85 // Add an effect to the timeline
87 {
88  // Add effect to list
89  effects.push_back(effect);
90 
91  // Sort effects
92  sort_effects();
93 }
94 
95 // Remove an effect from the timeline
97 {
98  effects.remove(effect);
99 }
100 
101 // Remove an openshot::Clip to the timeline
103 {
104  clips.remove(clip);
105 }
106 
107 // Apply a FrameMapper to a clip which matches the settings of this timeline
108 void Timeline::apply_mapper_to_clip(Clip* clip)
109 {
110  // Get lock (prevent getting frames while this happens)
111  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
112 
113  // Determine type of reader
114  ReaderBase* clip_reader = NULL;
115  if (clip->Reader()->Name() == "FrameMapper")
116  {
117  // Get the existing reader
118  clip_reader = (ReaderBase*) clip->Reader();
119 
120  } else {
121 
122  // Create a new FrameMapper to wrap the current reader
124  }
125 
126  // Update the mapping
127  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
129 
130  // Update clip reader
131  clip->Reader(clip_reader);
132 }
133 
134 // Apply the timeline's framerate and samplerate to all clips
136 {
137  // Clear all cached frames
138  ClearAllCache();
139 
140  // Loop through all clips
141  list<Clip*>::iterator clip_itr;
142  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
143  {
144  // Get clip object from the iterator
145  Clip *clip = (*clip_itr);
146 
147  // Apply framemapper (or update existing framemapper)
148  apply_mapper_to_clip(clip);
149  }
150 }
151 
152 // Calculate time of a frame number, based on a framerate
153 double Timeline::calculate_time(int64_t number, Fraction rate)
154 {
155  // Get float version of fps fraction
156  double raw_fps = rate.ToFloat();
157 
158  // Return the time (in seconds) of this frame
159  return double(number - 1) / raw_fps;
160 }
161 
162 // Apply effects to the source frame (if any)
163 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
164 {
165  // Debug output
166  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
167 
168  // Find Effects at this position and layer
169  list<EffectBase*>::iterator effect_itr;
170  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
171  {
172  // Get effect object from the iterator
173  EffectBase *effect = (*effect_itr);
174 
175  // Does clip intersect the current requested time
176  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
177  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
178 
179  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
180 
181  // Debug output
182  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
183 
184  // Clip is visible
185  if (does_effect_intersect)
186  {
187  // Determine the frame needed for this clip (based on the position on the timeline)
188  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
189  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
190 
191  // Debug output
192  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect, "", -1, "", -1, "", -1, "", -1);
193 
194  // Apply the effect to this frame
195  frame = effect->GetFrame(frame, effect_frame_number);
196  }
197 
198  } // end effect loop
199 
200  // Return modified frame
201  return frame;
202 }
203 
204 // Get or generate a blank frame
205 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
206 {
207  std::shared_ptr<Frame> new_frame;
208 
209  // Init some basic properties about this frame
210  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
211 
212  try {
213  // Debug output
214  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
215 
216  // Set max image size (used for performance optimization)
217  clip->SetMaxSize(info.width, info.height);
218 
219  // Attempt to get a frame (but this could fail if a reader has just been closed)
220  #pragma omp critical (T_GetOtCreateFrame)
221  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
222 
223  // Return real frame
224  return new_frame;
225 
226  } catch (const ReaderClosed & e) {
227  // ...
228  } catch (const TooManySeeks & e) {
229  // ...
230  } catch (const OutOfBoundsFrame & e) {
231  // ...
232  }
233 
234  // Debug output
235  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
236 
237  // Create blank frame
238  new_frame = std::make_shared<Frame>(number, max_width, max_height, "#000000", samples_in_frame, info.channels);
239  #pragma omp critical (T_GetOtCreateFrame)
240  {
241  new_frame->SampleRate(info.sample_rate);
242  new_frame->ChannelsLayout(info.channel_layout);
243  }
244  return new_frame;
245 }
246 
247 // Process a new layer of video or audio
248 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
249 {
250  // Get the clip's frame & image
251  std::shared_ptr<Frame> source_frame;
252  #pragma omp critical (T_addLayer)
253  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
254 
255  // No frame found... so bail
256  if (!source_frame)
257  return;
258 
259  // Debug output
260  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
261 
262  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
263  if (source_clip->Waveform())
264  {
265  // Debug output
266  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
267 
268  // Get the color of the waveform
269  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
270  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
271  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
272  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
273 
274  // Generate Waveform Dynamically (the size of the timeline)
275  std::shared_ptr<QImage> source_image;
276  #pragma omp critical (T_addLayer)
277  source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha);
278  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
279  }
280 
281  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
282  * effects on the top clip. */
283  if (is_top_clip && source_frame)
284  #pragma omp critical (T_addLayer)
285  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
286 
287  // Declare an image to hold the source frame's image
288  std::shared_ptr<QImage> source_image;
289 
290  /* COPY AUDIO - with correct volume */
291  if (source_clip->Reader()->info.has_audio) {
292  // Debug output
293  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
294 
295  if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
296  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
297  {
298  // Get volume from previous frame and this frame
299  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
300  float volume = source_clip->volume.GetValue(clip_frame_number);
301  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
302  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
303 
304  // Apply volume mixing strategy
305  if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
306  // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
307  previous_volume = previous_volume / max_volume;
308  volume = volume / max_volume;
309  }
310  else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
311  // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
312  previous_volume = previous_volume * 0.77;
313  volume = volume * 0.77;
314  }
315 
316  // If channel filter enabled, check for correct channel (and skip non-matching channels)
317  if (channel_filter != -1 && channel_filter != channel)
318  continue; // skip to next channel
319 
320  // If no volume on this frame or previous frame, do nothing
321  if (previous_volume == 0.0 && volume == 0.0)
322  continue; // skip to next channel
323 
324  // If channel mapping disabled, just use the current channel
325  if (channel_mapping == -1)
326  channel_mapping = channel;
327 
328  // Apply ramp to source frame (if needed)
329  if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
330  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
331 
332  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
333  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
334  // number of samples returned is variable... and does not match the number expected.
335  // This is a crude solution at best. =)
336  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
337  // Force timeline frame to match the source frame
338  #pragma omp critical (T_addLayer)
339  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
340 
341  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
342  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
343  #pragma omp critical (T_addLayer)
344  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
345 
346  }
347  else
348  // Debug output
349  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
350 
351  }
352 
353  // Skip out if only an audio frame
354  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
355  // Skip the rest of the image processing for performance reasons
356  return;
357 
358  // Debug output
359  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
360 
361  // Get actual frame image data
362  source_image = source_frame->GetImage();
363 
364  /* ALPHA & OPACITY */
365  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
366  {
367  float alpha = source_clip->alpha.GetValue(clip_frame_number);
368 
369  // Get source image's pixels
370  unsigned char *pixels = (unsigned char *) source_image->bits();
371 
372  // Loop through pixels
373  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
374  {
375  // Get the alpha values from the pixel
376  int A = pixels[byte_index + 3];
377 
378  // Apply alpha to pixel
379  pixels[byte_index + 3] *= alpha;
380  }
381 
382  // Debug output
383  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
384  }
385 
386  /* RESIZE SOURCE IMAGE - based on scale type */
387  QSize source_size = source_image->size();
388  switch (source_clip->scale)
389  {
390  case (SCALE_FIT):
391  // keep aspect ratio
392  source_size.scale(max_width, max_height, Qt::KeepAspectRatio);
393 
394  // Debug output
395  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
396  break;
397 
398  case (SCALE_STRETCH):
399  // ignore aspect ratio
400  source_size.scale(max_width, max_height, Qt::IgnoreAspectRatio);
401 
402  // Debug output
403  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
404  break;
405 
406  case (SCALE_CROP):
407  QSize width_size(max_width, round(max_width / (float(source_size.width()) / float(source_size.height()))));
408  QSize height_size(round(max_height / (float(source_size.height()) / float(source_size.width()))), max_height);
409 
410  // respect aspect ratio
411  if (width_size.width() >= max_width && width_size.height() >= max_height)
412  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
413  else
414  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
415 
416  // Debug output
417  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
418  break;
419  }
420 
421  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
422  float x = 0.0; // left
423  float y = 0.0; // top
424 
425  // Adjust size for scale x and scale y
426  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
427  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
428  float scaled_source_width = source_size.width() * sx;
429  float scaled_source_height = source_size.height() * sy;
430 
431  switch (source_clip->gravity)
432  {
433  case (GRAVITY_TOP):
434  x = (max_width - scaled_source_width) / 2.0; // center
435  break;
436  case (GRAVITY_TOP_RIGHT):
437  x = max_width - scaled_source_width; // right
438  break;
439  case (GRAVITY_LEFT):
440  y = (max_height - scaled_source_height) / 2.0; // center
441  break;
442  case (GRAVITY_CENTER):
443  x = (max_width - scaled_source_width) / 2.0; // center
444  y = (max_height - scaled_source_height) / 2.0; // center
445  break;
446  case (GRAVITY_RIGHT):
447  x = max_width - scaled_source_width; // right
448  y = (max_height - scaled_source_height) / 2.0; // center
449  break;
450  case (GRAVITY_BOTTOM_LEFT):
451  y = (max_height - scaled_source_height); // bottom
452  break;
453  case (GRAVITY_BOTTOM):
454  x = (max_width - scaled_source_width) / 2.0; // center
455  y = (max_height - scaled_source_height); // bottom
456  break;
457  case (GRAVITY_BOTTOM_RIGHT):
458  x = max_width - scaled_source_width; // right
459  y = (max_height - scaled_source_height); // bottom
460  break;
461  }
462 
463  // Debug output
464  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
465 
466  /* LOCATION, ROTATION, AND SCALE */
467  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
468  x += (max_width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
469  y += (max_height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
470  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
471  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
472 
473  bool transformed = false;
474  QTransform transform;
475 
476  // Transform source image (if needed)
477  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
478 
479  if (!isEqual(r, 0)) {
480  // ROTATE CLIP
481  float origin_x = x + (scaled_source_width / 2.0);
482  float origin_y = y + (scaled_source_height / 2.0);
483  transform.translate(origin_x, origin_y);
484  transform.rotate(r);
485  transform.translate(-origin_x,-origin_y);
486  transformed = true;
487  }
488 
489  if (!isEqual(x, 0) || !isEqual(y, 0)) {
490  // TRANSLATE/MOVE CLIP
491  transform.translate(x, y);
492  transformed = true;
493  }
494 
495  // SCALE CLIP (if needed)
496  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
497  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
498 
499  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
500  transform.scale(source_width_scale, source_height_scale);
501  transformed = true;
502  }
503 
504  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
505  // SHEAR HEIGHT/WIDTH
506  transform.shear(shear_x, shear_y);
507  transformed = true;
508  }
509 
510  // Debug output
511  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
512 
513  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
514  std::shared_ptr<QImage> new_image;
515  #pragma omp critical (T_addLayer)
516  new_image = new_frame->GetImage();
517 
518  // Load timeline's new frame image into a QPainter
519  QPainter painter(new_image.get());
520  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
521 
522  // Apply transform (translate, rotate, scale)... if any
523  if (transformed)
524  painter.setTransform(transform);
525 
526  // Composite a new layer onto the image
527  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
528  painter.drawImage(0, 0, *source_image);
529 
530  // Draw frame #'s on top of image (if needed)
531  if (source_clip->display != FRAME_DISPLAY_NONE) {
532  stringstream frame_number_str;
533  switch (source_clip->display)
534  {
535  case (FRAME_DISPLAY_CLIP):
536  frame_number_str << clip_frame_number;
537  break;
538 
539  case (FRAME_DISPLAY_TIMELINE):
540  frame_number_str << timeline_frame_number;
541  break;
542 
543  case (FRAME_DISPLAY_BOTH):
544  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
545  break;
546  }
547 
548  // Draw frame number on top of image
549  painter.setPen(QColor("#ffffff"));
550  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
551  }
552 
553  painter.end();
554 
555  // Debug output
556  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
557 }
558 
559 // Update the list of 'opened' clips
560 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
561 {
562  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
563 
564  // is clip already in list?
565  bool clip_found = open_clips.count(clip);
566 
567  if (clip_found && !does_clip_intersect)
568  {
569  // Remove clip from 'opened' list, because it's closed now
570  open_clips.erase(clip);
571 
572  // Close clip
573  clip->Close();
574  }
575  else if (!clip_found && does_clip_intersect)
576  {
577  // Add clip to 'opened' list, because it's missing
578  open_clips[clip] = clip;
579 
580  try {
581  // Open the clip
582  clip->Open();
583 
584  } catch (const InvalidFile & e) {
585  // ...
586  }
587  }
588 
589  // Debug output
590  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
591 }
592 
593 // Sort clips by position on the timeline
594 void Timeline::sort_clips()
595 {
596  // Debug output
597  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
598 
599  // sort clips
600  clips.sort(CompareClips());
601 }
602 
603 // Sort effects by position on the timeline
604 void Timeline::sort_effects()
605 {
606  // sort clips
607  effects.sort(CompareEffects());
608 }
609 
610 // Close the reader (and any resources it was consuming)
612 {
613  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
614 
615  // Close all open clips
616  list<Clip*>::iterator clip_itr;
617  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
618  {
619  // Get clip object from the iterator
620  Clip *clip = (*clip_itr);
621 
622  // Open or Close this clip, based on if it's intersecting or not
623  update_open_clips(clip, false);
624  }
625 
626  // Mark timeline as closed
627  is_open = false;
628 
629  // Clear cache
630  final_cache->Clear();
631 }
632 
633 // Open the reader (and start consuming resources)
635 {
636  is_open = true;
637 }
638 
639 // Compare 2 floating point numbers for equality
640 bool Timeline::isEqual(double a, double b)
641 {
642  return fabs(a - b) < 0.000001;
643 }
644 
645 // Get an openshot::Frame object for a specific frame number of this reader.
646 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
647 {
648  // Adjust out of bounds frame number
649  if (requested_frame < 1)
650  requested_frame = 1;
651 
652  // Check cache
653  std::shared_ptr<Frame> frame;
654  #pragma omp critical (T_GetFrame)
655  frame = final_cache->GetFrame(requested_frame);
656  if (frame) {
657  // Debug output
658  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
659 
660  // Return cached frame
661  return frame;
662  }
663  else
664  {
665  // Create a scoped lock, allowing only a single thread to run the following code at one time
666  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
667 
668  // Check for open reader (or throw exception)
669  if (!is_open)
670  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
671 
672  // Check cache again (due to locking)
673  #pragma omp critical (T_GetFrame)
674  frame = final_cache->GetFrame(requested_frame);
675  if (frame) {
676  // Debug output
677  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
678 
679  // Return cached frame
680  return frame;
681  }
682 
683  // Minimum number of frames to process (for performance reasons)
684  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
685 
686  // Get a list of clips that intersect with the requested section of timeline
687  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
688  vector<Clip*> nearby_clips;
689  #pragma omp critical (T_GetFrame)
690  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
691 
692  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
693  // Allow nested OpenMP sections
694  omp_set_nested(true);
695 
696  // Debug output
697  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
698 
699  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
700  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
701  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
702  {
703  // Loop through clips
704  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
705  {
706  // Get clip object from the iterator
707  Clip *clip = nearby_clips[clip_index];
708  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
709  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
710 
711  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
712  if (does_clip_intersect)
713  {
714  // Get clip frame #
715  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
716  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
717  // Cache clip object
718  clip->GetFrame(clip_frame_number);
719  }
720  }
721  }
722 
723  #pragma omp parallel
724  {
725  // Loop through all requested frames
726  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
727  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
728  {
729  // Debug output
730  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
731 
732  // Init some basic properties about this frame
733  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
734 
735  // Create blank frame (which will become the requested frame)
736  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, max_width, max_height, "#000000", samples_in_frame, info.channels));
737  #pragma omp critical (T_GetFrame)
738  {
739  new_frame->AddAudioSilence(samples_in_frame);
740  new_frame->SampleRate(info.sample_rate);
741  new_frame->ChannelsLayout(info.channel_layout);
742  }
743 
744  // Debug output
745  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
746 
747  // Add Background Color to 1st layer (if animated or not black)
748  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
749  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
750  new_frame->AddColor(max_width, max_height, color.GetColorHex(frame_number));
751 
752  // Debug output
753  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1);
754 
755  // Find Clips near this time
756  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
757  {
758  // Get clip object from the iterator
759  Clip *clip = nearby_clips[clip_index];
760  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
761  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
762 
763  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
764 
765  // Debug output
766  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1, "", -1);
767 
768  // Clip is visible
769  if (does_clip_intersect)
770  {
771  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
772  bool is_top_clip = true;
773  float max_volume = 0.0;
774  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
775  {
776  Clip *nearby_clip = nearby_clips[top_clip_index];
777  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
778  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
779  long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
780  long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
781 
782  // Determine if top clip
783  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
784  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
785  nearby_clip_start_position > clip_start_position && is_top_clip == true) {
786  is_top_clip = false;
787  }
788 
789  // Determine max volume of overlapping clips
790  if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
791  nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
792  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
793  max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
794  }
795  }
796 
797  // Determine the frame needed for this clip (based on the position on the timeline)
798  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
799  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
800 
801  // Debug output
802  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number, "", -1, "", -1);
803 
804  // Add clip's frame as layer
805  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
806 
807  } else
808  // Debug output
809  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1, "", -1);
810 
811  } // end clip loop
812 
813  // Debug output
814  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
815 
816  // Set frame # on mapped frame
817  #pragma omp ordered
818  {
819  new_frame->SetFrameNumber(frame_number);
820 
821  // Add final frame to cache
822  final_cache->Add(new_frame);
823  }
824 
825  } // end frame loop
826  } // end parallel
827 
828  // Debug output
829  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
830 
831  // Return frame (or blank frame)
832  return final_cache->GetFrame(requested_frame);
833  }
834 }
835 
836 
837 // Find intersecting clips (or non intersecting clips)
838 vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
839 {
840  // Find matching clips
841  vector<Clip*> matching_clips;
842 
843  // Calculate time of frame
844  float min_requested_frame = requested_frame;
845  float max_requested_frame = requested_frame + (number_of_frames - 1);
846 
847  // Re-Sort Clips (since they likely changed)
848  sort_clips();
849 
850  // Find Clips at this time
851  list<Clip*>::iterator clip_itr;
852  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
853  {
854  // Get clip object from the iterator
855  Clip *clip = (*clip_itr);
856 
857  // Does clip intersect the current requested time
858  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
859  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
860 
861  bool does_clip_intersect =
862  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
863  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
864 
865  // Debug output
866  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect, "", -1);
867 
868  // Open (or schedule for closing) this clip, based on if it's intersecting or not
869  #pragma omp critical (reader_lock)
870  update_open_clips(clip, does_clip_intersect);
871 
872  // Clip is visible
873  if (does_clip_intersect && include)
874  // Add the intersecting clip
875  matching_clips.push_back(clip);
876 
877  else if (!does_clip_intersect && !include)
878  // Add the non-intersecting clip
879  matching_clips.push_back(clip);
880 
881  } // end clip loop
882 
883  // return list
884  return matching_clips;
885 }
886 
887 // Get the cache object used by this reader
888 void Timeline::SetCache(CacheBase* new_cache) {
889  // Set new cache
890  final_cache = new_cache;
891 }
892 
893 // Generate JSON string of this object
894 string Timeline::Json() {
895 
896  // Return formatted string
897  return JsonValue().toStyledString();
898 }
899 
900 // Generate Json::JsonValue for this object
901 Json::Value Timeline::JsonValue() {
902 
903  // Create root json object
904  Json::Value root = ReaderBase::JsonValue(); // get parent properties
905  root["type"] = "Timeline";
906  root["viewport_scale"] = viewport_scale.JsonValue();
907  root["viewport_x"] = viewport_x.JsonValue();
908  root["viewport_y"] = viewport_y.JsonValue();
909  root["color"] = color.JsonValue();
910 
911  // Add array of clips
912  root["clips"] = Json::Value(Json::arrayValue);
913 
914  // Find Clips at this time
915  list<Clip*>::iterator clip_itr;
916  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
917  {
918  // Get clip object from the iterator
919  Clip *existing_clip = (*clip_itr);
920  root["clips"].append(existing_clip->JsonValue());
921  }
922 
923  // Add array of effects
924  root["effects"] = Json::Value(Json::arrayValue);
925 
926  // loop through effects
927  list<EffectBase*>::iterator effect_itr;
928  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
929  {
930  // Get clip object from the iterator
931  EffectBase *existing_effect = (*effect_itr);
932  root["effects"].append(existing_effect->JsonValue());
933  }
934 
935  // return JsonValue
936  return root;
937 }
938 
939 // Load JSON string into this object
940 void Timeline::SetJson(string value) {
941 
942  // Get lock (prevent getting frames while this happens)
943  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
944 
945  // Parse JSON string into JSON objects
946  Json::Value root;
947  Json::Reader reader;
948  bool success = reader.parse( value, root );
949  if (!success)
950  // Raise exception
951  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
952 
953  try
954  {
955  // Set all values that match
956  SetJsonValue(root);
957  }
958  catch (exception e)
959  {
960  // Error parsing JSON (or missing keys)
961  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
962  }
963 }
964 
965 // Load Json::JsonValue into this object
966 void Timeline::SetJsonValue(Json::Value root) {
967 
968  // Close timeline before we do anything (this also removes all open and closing clips)
969  bool was_open = is_open;
970  Close();
971 
972  // Set parent data
974 
975  if (!root["clips"].isNull()) {
976  // Clear existing clips
977  clips.clear();
978 
979  // loop through clips
980  for (int x = 0; x < root["clips"].size(); x++) {
981  // Get each clip
982  Json::Value existing_clip = root["clips"][x];
983 
984  // Create Clip
985  Clip *c = new Clip();
986 
987  // Load Json into Clip
988  c->SetJsonValue(existing_clip);
989 
990  // Add Clip to Timeline
991  AddClip(c);
992  }
993  }
994 
995  if (!root["effects"].isNull()) {
996  // Clear existing effects
997  effects.clear();
998 
999  // loop through effects
1000  for (int x = 0; x < root["effects"].size(); x++) {
1001  // Get each effect
1002  Json::Value existing_effect = root["effects"][x];
1003 
1004  // Create Effect
1005  EffectBase *e = NULL;
1006 
1007  if (!existing_effect["type"].isNull()) {
1008  // Create instance of effect
1009  if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
1010 
1011  // Load Json into Effect
1012  e->SetJsonValue(existing_effect);
1013 
1014  // Add Effect to Timeline
1015  AddEffect(e);
1016  }
1017  }
1018  }
1019  }
1020 
1021  if (!root["duration"].isNull()) {
1022  // Update duration of timeline
1023  info.duration = root["duration"].asDouble();
1025  }
1026 
1027  // Re-open if needed
1028  if (was_open)
1029  Open();
1030 }
1031 
1032 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1033 void Timeline::ApplyJsonDiff(string value) {
1034 
1035  // Get lock (prevent getting frames while this happens)
1036  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1037 
1038  // Parse JSON string into JSON objects
1039  Json::Value root;
1040  Json::Reader reader;
1041  bool success = reader.parse( value, root );
1042  if (!success || !root.isArray())
1043  // Raise exception
1044  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
1045 
1046  try
1047  {
1048  // Process the JSON change array, loop through each item
1049  for (int x = 0; x < root.size(); x++) {
1050  // Get each change
1051  Json::Value change = root[x];
1052  string root_key = change["key"][(uint)0].asString();
1053 
1054  // Process each type of change
1055  if (root_key == "clips")
1056  // Apply to CLIPS
1057  apply_json_to_clips(change);
1058 
1059  else if (root_key == "effects")
1060  // Apply to EFFECTS
1061  apply_json_to_effects(change);
1062 
1063  else
1064  // Apply to TIMELINE
1065  apply_json_to_timeline(change);
1066 
1067  }
1068  }
1069  catch (exception e)
1070  {
1071  // Error parsing JSON (or missing keys)
1072  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1073  }
1074 }
1075 
1076 // Apply JSON diff to clips
1077 void Timeline::apply_json_to_clips(Json::Value change) {
1078 
1079  // Get key and type of change
1080  string change_type = change["type"].asString();
1081  string clip_id = "";
1082  Clip *existing_clip = NULL;
1083 
1084  // Find id of clip (if any)
1085  for (int x = 0; x < change["key"].size(); x++) {
1086  // Get each change
1087  Json::Value key_part = change["key"][x];
1088 
1089  if (key_part.isObject()) {
1090  // Check for id
1091  if (!key_part["id"].isNull()) {
1092  // Set the id
1093  clip_id = key_part["id"].asString();
1094 
1095  // Find matching clip in timeline (if any)
1096  list<Clip*>::iterator clip_itr;
1097  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1098  {
1099  // Get clip object from the iterator
1100  Clip *c = (*clip_itr);
1101  if (c->Id() == clip_id) {
1102  existing_clip = c;
1103  break; // clip found, exit loop
1104  }
1105  }
1106  break; // id found, exit loop
1107  }
1108  }
1109  }
1110 
1111  // Check for a more specific key (targetting this clip's effects)
1112  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1113  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1114  {
1115  // This change is actually targetting a specific effect under a clip (and not the clip)
1116  Json::Value key_part = change["key"][3];
1117 
1118  if (key_part.isObject()) {
1119  // Check for id
1120  if (!key_part["id"].isNull())
1121  {
1122  // Set the id
1123  string effect_id = key_part["id"].asString();
1124 
1125  // Find matching effect in timeline (if any)
1126  list<EffectBase*> effect_list = existing_clip->Effects();
1127  list<EffectBase*>::iterator effect_itr;
1128  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1129  {
1130  // Get effect object from the iterator
1131  EffectBase *e = (*effect_itr);
1132  if (e->Id() == effect_id) {
1133  // Apply the change to the effect directly
1134  apply_json_to_effects(change, e);
1135 
1136  // Calculate start and end frames that this impacts, and remove those frames from the cache
1137  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1138  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1139  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1140 
1141  return; // effect found, don't update clip
1142  }
1143  }
1144  }
1145  }
1146  }
1147 
1148  // Calculate start and end frames that this impacts, and remove those frames from the cache
1149  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1150  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1151  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1152  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1153  }
1154 
1155  // Determine type of change operation
1156  if (change_type == "insert") {
1157 
1158  // Create new clip
1159  Clip *clip = new Clip();
1160  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1161  AddClip(clip); // Add clip to timeline
1162 
1163  // Apply framemapper (or update existing framemapper)
1164  apply_mapper_to_clip(clip);
1165 
1166  } else if (change_type == "update") {
1167 
1168  // Update existing clip
1169  if (existing_clip) {
1170 
1171  // Calculate start and end frames that this impacts, and remove those frames from the cache
1172  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1173  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1174  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1175 
1176  // Remove cache on clip's Reader (if found)
1177  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1178  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1179 
1180  // Update clip properties from JSON
1181  existing_clip->SetJsonValue(change["value"]);
1182 
1183  // Apply framemapper (or update existing framemapper)
1184  apply_mapper_to_clip(existing_clip);
1185 
1186  // Clear any cached image sizes (since size might have changed)
1187  existing_clip->SetMaxSize(0, 0); // force clearing of cached image size
1188  if (existing_clip->Reader()) {
1189  existing_clip->Reader()->SetMaxSize(0, 0);
1190  if (existing_clip->Reader()->Name() == "FrameMapper") {
1191  FrameMapper *nested_reader = (FrameMapper *) existing_clip->Reader();
1192  if (nested_reader->Reader())
1193  nested_reader->Reader()->SetMaxSize(0, 0);
1194  }
1195  }
1196  }
1197 
1198  } else if (change_type == "delete") {
1199 
1200  // Remove existing clip
1201  if (existing_clip) {
1202 
1203  // Calculate start and end frames that this impacts, and remove those frames from the cache
1204  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1205  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1206  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1207 
1208  // Remove clip from timeline
1209  RemoveClip(existing_clip);
1210  }
1211 
1212  }
1213 
1214 }
1215 
1216 // Apply JSON diff to effects
1217 void Timeline::apply_json_to_effects(Json::Value change) {
1218 
1219  // Get key and type of change
1220  string change_type = change["type"].asString();
1221  EffectBase *existing_effect = NULL;
1222 
1223  // Find id of an effect (if any)
1224  for (int x = 0; x < change["key"].size(); x++) {
1225  // Get each change
1226  Json::Value key_part = change["key"][x];
1227 
1228  if (key_part.isObject()) {
1229  // Check for id
1230  if (!key_part["id"].isNull())
1231  {
1232  // Set the id
1233  string effect_id = key_part["id"].asString();
1234 
1235  // Find matching effect in timeline (if any)
1236  list<EffectBase*>::iterator effect_itr;
1237  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1238  {
1239  // Get effect object from the iterator
1240  EffectBase *e = (*effect_itr);
1241  if (e->Id() == effect_id) {
1242  existing_effect = e;
1243  break; // effect found, exit loop
1244  }
1245  }
1246  break; // id found, exit loop
1247  }
1248  }
1249  }
1250 
1251  // Now that we found the effect, apply the change to it
1252  if (existing_effect || change_type == "insert")
1253  // Apply change to effect
1254  apply_json_to_effects(change, existing_effect);
1255 }
1256 
1257 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1258 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1259 
1260  // Get key and type of change
1261  string change_type = change["type"].asString();
1262 
1263  // Calculate start and end frames that this impacts, and remove those frames from the cache
1264  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1265  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1266  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1267  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1268  }
1269 
1270  // Determine type of change operation
1271  if (change_type == "insert") {
1272 
1273  // Determine type of effect
1274  string effect_type = change["value"]["type"].asString();
1275 
1276  // Create Effect
1277  EffectBase *e = NULL;
1278 
1279  // Init the matching effect object
1280  if (e = EffectInfo().CreateEffect(effect_type)) {
1281 
1282  // Load Json into Effect
1283  e->SetJsonValue(change["value"]);
1284 
1285  // Add Effect to Timeline
1286  AddEffect(e);
1287  }
1288 
1289  } else if (change_type == "update") {
1290 
1291  // Update existing effect
1292  if (existing_effect) {
1293 
1294  // Calculate start and end frames that this impacts, and remove those frames from the cache
1295  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1296  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1297  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1298 
1299  // Update effect properties from JSON
1300  existing_effect->SetJsonValue(change["value"]);
1301  }
1302 
1303  } else if (change_type == "delete") {
1304 
1305  // Remove existing effect
1306  if (existing_effect) {
1307 
1308  // Calculate start and end frames that this impacts, and remove those frames from the cache
1309  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1310  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1311  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1312 
1313  // Remove effect from timeline
1314  RemoveEffect(existing_effect);
1315  }
1316 
1317  }
1318 }
1319 
1320 // Apply JSON diff to timeline properties
1321 void Timeline::apply_json_to_timeline(Json::Value change) {
1322 
1323  // Get key and type of change
1324  string change_type = change["type"].asString();
1325  string root_key = change["key"][(uint)0].asString();
1326  string sub_key = "";
1327  if (change["key"].size() >= 2)
1328  sub_key = change["key"][(uint)1].asString();
1329 
1330  // Clear entire cache
1331  final_cache->Clear();
1332 
1333  // Determine type of change operation
1334  if (change_type == "insert" || change_type == "update") {
1335 
1336  // INSERT / UPDATE
1337  // Check for valid property
1338  if (root_key == "color")
1339  // Set color
1340  color.SetJsonValue(change["value"]);
1341  else if (root_key == "viewport_scale")
1342  // Set viewport scale
1343  viewport_scale.SetJsonValue(change["value"]);
1344  else if (root_key == "viewport_x")
1345  // Set viewport x offset
1346  viewport_x.SetJsonValue(change["value"]);
1347  else if (root_key == "viewport_y")
1348  // Set viewport y offset
1349  viewport_y.SetJsonValue(change["value"]);
1350  else if (root_key == "duration") {
1351  // Update duration of timeline
1352  info.duration = change["value"].asDouble();
1354  }
1355  else if (root_key == "width")
1356  // Set width
1357  info.width = change["value"].asInt();
1358  else if (root_key == "height")
1359  // Set height
1360  info.height = change["value"].asInt();
1361  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1362  // Set fps fraction
1363  if (!change["value"]["num"].isNull())
1364  info.fps.num = change["value"]["num"].asInt();
1365  if (!change["value"]["den"].isNull())
1366  info.fps.den = change["value"]["den"].asInt();
1367  }
1368  else if (root_key == "fps" && sub_key == "num")
1369  // Set fps.num
1370  info.fps.num = change["value"].asInt();
1371  else if (root_key == "fps" && sub_key == "den")
1372  // Set fps.den
1373  info.fps.den = change["value"].asInt();
1374  else if (root_key == "sample_rate")
1375  // Set sample rate
1376  info.sample_rate = change["value"].asInt();
1377  else if (root_key == "channels")
1378  // Set channels
1379  info.channels = change["value"].asInt();
1380  else if (root_key == "channel_layout")
1381  // Set channel layout
1382  info.channel_layout = (ChannelLayout) change["value"].asInt();
1383 
1384  else
1385 
1386  // Error parsing JSON (or missing keys)
1387  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1388 
1389 
1390  } else if (change["type"].asString() == "delete") {
1391 
1392  // DELETE / RESET
1393  // Reset the following properties (since we can't delete them)
1394  if (root_key == "color") {
1395  color = Color();
1396  color.red = Keyframe(0.0);
1397  color.green = Keyframe(0.0);
1398  color.blue = Keyframe(0.0);
1399  }
1400  else if (root_key == "viewport_scale")
1401  viewport_scale = Keyframe(1.0);
1402  else if (root_key == "viewport_x")
1403  viewport_x = Keyframe(0.0);
1404  else if (root_key == "viewport_y")
1405  viewport_y = Keyframe(0.0);
1406  else
1407  // Error parsing JSON (or missing keys)
1408  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1409 
1410  }
1411 
1412 }
1413 
1414 // Clear all caches
1416 
1417  // Get lock (prevent getting frames while this happens)
1418  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1419 
1420  // Clear primary cache
1421  final_cache->Clear();
1422 
1423  // Loop through all clips
1424  list<Clip*>::iterator clip_itr;
1425  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1426  {
1427  // Get clip object from the iterator
1428  Clip *clip = (*clip_itr);
1429 
1430  // Clear cache on clip
1431  clip->Reader()->GetCache()->Clear();
1432 
1433  // Clear nested Reader (if any)
1434  if (clip->Reader()->Name() == "FrameMapper") {
1435  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1436  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1437  nested_reader->Reader()->GetCache()->Clear();
1438  }
1439 
1440  }
1441 }
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:966
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ReaderBase.h:104
Display the timeline's frame number.
Definition: Enums.h:69
void Close()
Close the internal reader.
Definition: Clip.cpp:247
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:894
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:224
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:70
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:100
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:249
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:888
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:154
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:234
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:901
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:77
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:251
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:96
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
#define OPEN_MP_NUM_PROCESSORS
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:263
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
Do not display the frame number.
Definition: Enums.h:67
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:283
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:237
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:81
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:842
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:225
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:226
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:144
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
virtual std::shared_ptr< Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:247
bool Waveform()
Waveform property.
Definition: Clip.h:219
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ClipBase.h:97
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:155
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:82
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:259
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1415
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
Definition: Timeline.cpp:135
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:210
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:182
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:157
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:646
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:260
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:71
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:113
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:121
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:611
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:230
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:168
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip's internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:112
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:248
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:230
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:78
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:634
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1033
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Display both the clip's and timeline's frame number.
Definition: Enums.h:70
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:102
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:96
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:229
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:250
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:223
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:158
Color color
Background color of timeline canvas.
Definition: Timeline.h:254
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:940
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:86
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ReaderBase.h:103
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:521
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:761
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254