OpenShot Library | libopenshot  0.2.3
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Clip.h"
29 #include "../include/FFmpegReader.h"
30 #include "../include/FrameMapper.h"
31 #ifdef USE_IMAGEMAGICK
32  #include "../include/ImageReader.h"
33  #include "../include/TextReader.h"
34 #endif
35 #include "../include/QtImageReader.h"
36 #include "../include/ChunkReader.h"
37 #include "../include/DummyReader.h"
38 
39 using namespace openshot;
40 
41 // Init default settings for a clip
42 void Clip::init_settings()
43 {
44  // Init clip settings
45  Position(0.0);
46  Layer(0);
47  Start(0.0);
48  End(0.0);
50  scale = SCALE_FIT;
54  waveform = false;
56 
57  // Init scale curves
58  scale_x = Keyframe(1.0);
59  scale_y = Keyframe(1.0);
60 
61  // Init location curves
62  location_x = Keyframe(0.0);
63  location_y = Keyframe(0.0);
64 
65  // Init alpha
66  alpha = Keyframe(1.0);
67 
68  // Init rotation
69  init_reader_rotation();
70 
71  // Init time & volume
72  time = Keyframe(1.0);
73  volume = Keyframe(1.0);
74 
75  // Init audio waveform color
76  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
77 
78  // Init crop settings
80  crop_width = Keyframe(-1.0);
81  crop_height = Keyframe(-1.0);
82  crop_x = Keyframe(0.0);
83  crop_y = Keyframe(0.0);
84 
85  // Init shear and perspective curves
86  shear_x = Keyframe(0.0);
87  shear_y = Keyframe(0.0);
88  perspective_c1_x = Keyframe(-1.0);
89  perspective_c1_y = Keyframe(-1.0);
90  perspective_c2_x = Keyframe(-1.0);
91  perspective_c2_y = Keyframe(-1.0);
92  perspective_c3_x = Keyframe(-1.0);
93  perspective_c3_y = Keyframe(-1.0);
94  perspective_c4_x = Keyframe(-1.0);
95  perspective_c4_y = Keyframe(-1.0);
96 
97  // Init audio channel filter and mappings
98  channel_filter = Keyframe(-1.0);
99  channel_mapping = Keyframe(-1.0);
100 
101  // Init audio and video overrides
102  has_audio = Keyframe(-1.0);
103  has_video = Keyframe(-1.0);
104 
105  // Default pointers
106  manage_reader = false;
107 }
108 
109 // Init reader's rotation (if any)
110 void Clip::init_reader_rotation() {
111  // Only init rotation from reader when needed
112  if (rotation.Points.size() > 1)
113  // Do nothing if more than 1 rotation Point
114  return;
115  else if (rotation.Points.size() == 1 && rotation.GetValue(1) != 0.0)
116  // Do nothing if 1 Point, and it's not the default value
117  return;
118 
119  // Init rotation
120  if (reader && reader->info.metadata.count("rotate") > 0) {
121  // Use reader metadata rotation (if any)
122  // This is typical with cell phone videos filmed in different orientations
123  try {
124  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
125  rotation = Keyframe(rotate_metadata);
126  } catch (exception e) {}
127  }
128  else
129  // Default no rotation
130  rotation = Keyframe(0.0);
131 }
132 
133 // Default Constructor for a clip
134 Clip::Clip() : reader(NULL), resampler(NULL), audio_cache(NULL)
135 {
136  // Init all default settings
137  init_settings();
138 }
139 
140 // Constructor with reader
141 Clip::Clip(ReaderBase* new_reader) : reader(new_reader), resampler(NULL), audio_cache(NULL)
142 {
143  // Init all default settings
144  init_settings();
145 
146  // Open and Close the reader (to set the duration of the clip)
147  Open();
148  Close();
149 
150  // Update duration
151  End(reader->info.duration);
152 }
153 
154 // Constructor with filepath
155 Clip::Clip(string path) : reader(NULL), resampler(NULL), audio_cache(NULL)
156 {
157  // Init all default settings
158  init_settings();
159 
160  // Get file extension (and convert to lower case)
161  string ext = get_file_extension(path);
162  transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
163 
164  // Determine if common video formats
165  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
166  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
167  {
168  try
169  {
170  // Open common video format
171  reader = new FFmpegReader(path);
172 
173  } catch(...) { }
174  }
175 
176  // If no video found, try each reader
177  if (!reader)
178  {
179  try
180  {
181  // Try an image reader
182  reader = new QtImageReader(path);
183 
184  } catch(...) {
185  try
186  {
187  // Try a video reader
188  reader = new FFmpegReader(path);
189 
190  } catch(...) { }
191  }
192  }
193 
194  // Update duration
195  if (reader) {
196  End(reader->info.duration);
197  manage_reader = true;
198  init_reader_rotation();
199  }
200 }
201 
202 // Destructor
204 {
205  // Delete the reader if clip created it
206  if (manage_reader && reader) {
207  delete reader;
208  reader = NULL;
209  }
210 
211  // Close the resampler
212  if (resampler) {
213  delete resampler;
214  resampler = NULL;
215  }
216 }
217 
218 /// Set the current reader
219 void Clip::Reader(ReaderBase* new_reader)
220 {
221  // set reader pointer
222  reader = new_reader;
223 
224  // set parent
225  reader->SetClip(this);
226 
227  // Init rotation (if any)
228  init_reader_rotation();
229 }
230 
231 /// Get the current reader
233 {
234  if (reader)
235  return reader;
236  else
237  // Throw error if reader not initialized
238  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
239 }
240 
241 // Open the internal reader
243 {
244  if (reader)
245  {
246  // Open the reader
247  reader->Open();
248 
249  // Set some clip properties from the file reader
250  if (end == 0.0)
251  End(reader->info.duration);
252  }
253  else
254  // Throw error if reader not initialized
255  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
256 }
257 
258 // Close the internal reader
260 {
261  if (reader) {
262  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
263 
264  // Close the reader
265  reader->Close();
266  }
267  else
268  // Throw error if reader not initialized
269  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
270 }
271 
272 // Get end position of clip (trim end of video), which can be affected by the time curve.
273 float Clip::End()
274 {
275  // if a time curve is present, use it's length
276  if (time.Points.size() > 1)
277  {
278  // Determine the FPS fo this clip
279  float fps = 24.0;
280  if (reader)
281  // file reader
282  fps = reader->info.fps.ToFloat();
283  else
284  // Throw error if reader not initialized
285  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
286 
287  return float(time.GetLength()) / fps;
288  }
289  else
290  // just use the duration (as detected by the reader)
291  return end;
292 }
293 
294 // Get an openshot::Frame object for a specific frame number of this reader.
295 std::shared_ptr<Frame> Clip::GetFrame(int64_t requested_frame)
296 {
297  if (reader)
298  {
299  // Adjust out of bounds frame number
300  requested_frame = adjust_frame_number_minimum(requested_frame);
301 
302  // Adjust has_video and has_audio overrides
303  int enabled_audio = has_audio.GetInt(requested_frame);
304  if (enabled_audio == -1 && reader && reader->info.has_audio)
305  enabled_audio = 1;
306  else if (enabled_audio == -1 && reader && !reader->info.has_audio)
307  enabled_audio = 0;
308  int enabled_video = has_video.GetInt(requested_frame);
309  if (enabled_video == -1 && reader && reader->info.has_video)
310  enabled_video = 1;
311  else if (enabled_video == -1 && reader && !reader->info.has_audio)
312  enabled_video = 0;
313 
314  // Is a time map detected
315  int64_t new_frame_number = requested_frame;
316  int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
317  if (time.Values.size() > 1)
318  new_frame_number = time_mapped_number;
319 
320  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
321  std::shared_ptr<Frame> original_frame;
322  #pragma omp critical (Clip_GetFrame)
323  original_frame = GetOrCreateFrame(new_frame_number);
324 
325  // Create a new frame
326  std::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
327  #pragma omp critical (Clip_GetFrame)
328  {
329  frame->SampleRate(original_frame->SampleRate());
330  frame->ChannelsLayout(original_frame->ChannelsLayout());
331  }
332 
333  // Copy the image from the odd field
334  if (enabled_video)
335  frame->AddImage(std::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
336 
337  // Loop through each channel, add audio
338  if (enabled_audio && reader->info.has_audio)
339  for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
340  frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
341 
342  // Get time mapped frame number (used to increase speed, change direction, etc...)
343  get_time_mapped_frame(frame, requested_frame);
344 
345  // Apply effects to the frame (if any)
346  apply_effects(frame);
347 
348  // Return processed 'frame'
349  return frame;
350  }
351  else
352  // Throw error if reader not initialized
353  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
354 }
355 
356 // Get file extension
357 string Clip::get_file_extension(string path)
358 {
359  // return last part of path
360  return path.substr(path.find_last_of(".") + 1);
361 }
362 
363 // Reverse an audio buffer
364 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
365 {
366  int number_of_samples = buffer->getNumSamples();
367  int channels = buffer->getNumChannels();
368 
369  // Reverse array (create new buffer to hold the reversed version)
370  AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
371  reversed->clear();
372 
373  for (int channel = 0; channel < channels; channel++)
374  {
375  int n=0;
376  for (int s = number_of_samples - 1; s >= 0; s--, n++)
377  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
378  }
379 
380  // Copy the samples back to the original array
381  buffer->clear();
382  // Loop through channels, and get audio samples
383  for (int channel = 0; channel < channels; channel++)
384  // Get the audio samples for this channel
385  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
386 
387  delete reversed;
388  reversed = NULL;
389 }
390 
391 // Adjust the audio and image of a time mapped frame
392 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
393 {
394  // Check for valid reader
395  if (!reader)
396  // Throw error if reader not initialized
397  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
398 
399  // Check for a valid time map curve
400  if (time.Values.size() > 1)
401  {
402  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
403 
404  // create buffer and resampler
405  juce::AudioSampleBuffer *samples = NULL;
406  if (!resampler)
407  resampler = new AudioResampler();
408 
409  // Get new frame number
410  int new_frame_number = frame->number;
411 
412  // Get delta (difference in previous Y value)
413  int delta = int(round(time.GetDelta(frame_number)));
414 
415  // Init audio vars
416  int sample_rate = reader->info.sample_rate;
417  int channels = reader->info.channels;
418  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
419 
420  // Only resample audio if needed
421  if (reader->info.has_audio) {
422  // Determine if we are speeding up or slowing down
423  if (time.GetRepeatFraction(frame_number).den > 1) {
424  // SLOWING DOWN AUDIO
425  // Resample data, and return new buffer pointer
426  AudioSampleBuffer *resampled_buffer = NULL;
427  int resampled_buffer_size = 0;
428 
429  // SLOW DOWN audio (split audio)
430  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
431  samples->clear();
432 
433  // Loop through channels, and get audio samples
434  for (int channel = 0; channel < channels; channel++)
435  // Get the audio samples for this channel
436  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
437  number_of_samples, 1.0f);
438 
439  // Reverse the samples (if needed)
440  if (!time.IsIncreasing(frame_number))
441  reverse_buffer(samples);
442 
443  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
444  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
445 
446  // Resample the data (since it's the 1st slice)
447  resampled_buffer = resampler->GetResampledBuffer();
448 
449  // Get the length of the resampled buffer (if one exists)
450  resampled_buffer_size = resampled_buffer->getNumSamples();
451 
452  // Just take the samples we need for the requested frame
453  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
454  if (start > 0)
455  start -= 1;
456  for (int channel = 0; channel < channels; channel++)
457  // Add new (slower) samples, to the frame object
458  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
459  number_of_samples, 1.0f);
460 
461  // Clean up
462  resampled_buffer = NULL;
463 
464  }
465  else if (abs(delta) > 1 && abs(delta) < 100) {
466  int start = 0;
467  if (delta > 0) {
468  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
469  int total_delta_samples = 0;
470  for (int delta_frame = new_frame_number - (delta - 1);
471  delta_frame <= new_frame_number; delta_frame++)
472  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
473  reader->info.sample_rate,
474  reader->info.channels);
475 
476  // Allocate a new sample buffer for these delta frames
477  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
478  samples->clear();
479 
480  // Loop through each frame in this delta
481  for (int delta_frame = new_frame_number - (delta - 1);
482  delta_frame <= new_frame_number; delta_frame++) {
483  // buffer to hold detal samples
484  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
485  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
486  number_of_delta_samples);
487  delta_samples->clear();
488 
489  for (int channel = 0; channel < channels; channel++)
490  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
491  number_of_delta_samples, 1.0f);
492 
493  // Reverse the samples (if needed)
494  if (!time.IsIncreasing(frame_number))
495  reverse_buffer(delta_samples);
496 
497  // Copy the samples to
498  for (int channel = 0; channel < channels; channel++)
499  // Get the audio samples for this channel
500  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
501  number_of_delta_samples, 1.0f);
502 
503  // Clean up
504  delete delta_samples;
505  delta_samples = NULL;
506 
507  // Increment start position
508  start += number_of_delta_samples;
509  }
510  }
511  else {
512  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
513  int total_delta_samples = 0;
514  for (int delta_frame = new_frame_number - (delta + 1);
515  delta_frame >= new_frame_number; delta_frame--)
516  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
517  reader->info.sample_rate,
518  reader->info.channels);
519 
520  // Allocate a new sample buffer for these delta frames
521  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
522  samples->clear();
523 
524  // Loop through each frame in this delta
525  for (int delta_frame = new_frame_number - (delta + 1);
526  delta_frame >= new_frame_number; delta_frame--) {
527  // buffer to hold delta samples
528  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
529  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
530  number_of_delta_samples);
531  delta_samples->clear();
532 
533  for (int channel = 0; channel < channels; channel++)
534  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
535  number_of_delta_samples, 1.0f);
536 
537  // Reverse the samples (if needed)
538  if (!time.IsIncreasing(frame_number))
539  reverse_buffer(delta_samples);
540 
541  // Copy the samples to
542  for (int channel = 0; channel < channels; channel++)
543  // Get the audio samples for this channel
544  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
545  number_of_delta_samples, 1.0f);
546 
547  // Clean up
548  delete delta_samples;
549  delta_samples = NULL;
550 
551  // Increment start position
552  start += number_of_delta_samples;
553  }
554  }
555 
556  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
557  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
558 
559  // Resample data, and return new buffer pointer
560  AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
561  int resampled_buffer_size = buffer->getNumSamples();
562 
563  // Add the newly resized audio samples to the current frame
564  for (int channel = 0; channel < channels; channel++)
565  // Add new (slower) samples, to the frame object
566  frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
567 
568  // Clean up
569  buffer = NULL;
570  }
571  else {
572  // Use the samples on this frame (but maybe reverse them if needed)
573  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
574  samples->clear();
575 
576  // Loop through channels, and get audio samples
577  for (int channel = 0; channel < channels; channel++)
578  // Get the audio samples for this channel
579  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
580 
581  // reverse the samples
582  if (!time.IsIncreasing(frame_number))
583  reverse_buffer(samples);
584 
585  // Add reversed samples to the frame object
586  for (int channel = 0; channel < channels; channel++)
587  frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
588 
589 
590  }
591 
592  delete samples;
593  samples = NULL;
594  }
595  }
596 }
597 
598 // Adjust frame number minimum value
599 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
600 {
601  // Never return a frame number 0 or below
602  if (frame_number < 1)
603  return 1;
604  else
605  return frame_number;
606 
607 }
608 
609 // Get or generate a blank frame
610 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
611 {
612  std::shared_ptr<Frame> new_frame;
613 
614  // Init some basic properties about this frame
615  int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
616 
617  try {
618  // Debug output
619  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
620 
621  // Attempt to get a frame (but this could fail if a reader has just been closed)
622  new_frame = reader->GetFrame(number);
623 
624  // Return real frame
625  if (new_frame)
626  return new_frame;
627 
628  } catch (const ReaderClosed & e) {
629  // ...
630  } catch (const TooManySeeks & e) {
631  // ...
632  } catch (const OutOfBoundsFrame & e) {
633  // ...
634  }
635 
636  // Debug output
637  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
638 
639  // Create blank frame
640  new_frame = std::make_shared<Frame>(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels);
641  new_frame->SampleRate(reader->info.sample_rate);
642  new_frame->ChannelsLayout(reader->info.channel_layout);
643  new_frame->AddAudioSilence(samples_in_frame);
644  return new_frame;
645 }
646 
647 // Generate JSON string of this object
648 string Clip::Json() {
649 
650  // Return formatted string
651  return JsonValue().toStyledString();
652 }
653 
654 // Get all properties for a specific frame
655 string Clip::PropertiesJSON(int64_t requested_frame) {
656 
657  // Generate JSON properties list
658  Json::Value root;
659  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
660  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
661  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
662  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
663  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
664  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
665  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
666  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
667  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
668  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
669  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
670 
671  // Add gravity choices (dropdown style)
672  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
673  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
674  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
675  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
676  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
677  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
678  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
679  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
680  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
681 
682  // Add scale choices (dropdown style)
683  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
684  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
685  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
686  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
687 
688  // Add frame number display choices (dropdown style)
689  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
690  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
691  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
692  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
693 
694  // Add volume mixing choices (dropdown style)
695  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
696  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
697  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
698 
699  // Add waveform choices (dropdown style)
700  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
701  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
702 
703  // Keyframes
704  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
705  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
706  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
707  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
708  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
709  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
710  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
711  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
712  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
713  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
714  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
715  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
716  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
717  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
718 
719  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
720  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
721  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
722  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
723 
724 
725  // Return formatted string
726  return root.toStyledString();
727 }
728 
729 // Generate Json::JsonValue for this object
730 Json::Value Clip::JsonValue() {
731 
732  // Create root json object
733  Json::Value root = ClipBase::JsonValue(); // get parent properties
734  root["gravity"] = gravity;
735  root["scale"] = scale;
736  root["anchor"] = anchor;
737  root["display"] = display;
738  root["mixing"] = mixing;
739  root["waveform"] = waveform;
740  root["scale_x"] = scale_x.JsonValue();
741  root["scale_y"] = scale_y.JsonValue();
742  root["location_x"] = location_x.JsonValue();
743  root["location_y"] = location_y.JsonValue();
744  root["alpha"] = alpha.JsonValue();
745  root["rotation"] = rotation.JsonValue();
746  root["time"] = time.JsonValue();
747  root["volume"] = volume.JsonValue();
748  root["wave_color"] = wave_color.JsonValue();
749  root["crop_width"] = crop_width.JsonValue();
750  root["crop_height"] = crop_height.JsonValue();
751  root["crop_x"] = crop_x.JsonValue();
752  root["crop_y"] = crop_y.JsonValue();
753  root["shear_x"] = shear_x.JsonValue();
754  root["shear_y"] = shear_y.JsonValue();
755  root["channel_filter"] = channel_filter.JsonValue();
756  root["channel_mapping"] = channel_mapping.JsonValue();
757  root["has_audio"] = has_audio.JsonValue();
758  root["has_video"] = has_video.JsonValue();
759  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
760  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
761  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
762  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
763  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
764  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
765  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
766  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
767 
768  // Add array of effects
769  root["effects"] = Json::Value(Json::arrayValue);
770 
771  // loop through effects
772  list<EffectBase*>::iterator effect_itr;
773  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
774  {
775  // Get clip object from the iterator
776  EffectBase *existing_effect = (*effect_itr);
777  root["effects"].append(existing_effect->JsonValue());
778  }
779 
780  if (reader)
781  root["reader"] = reader->JsonValue();
782 
783  // return JsonValue
784  return root;
785 }
786 
787 // Load JSON string into this object
788 void Clip::SetJson(string value) {
789 
790  // Parse JSON string into JSON objects
791  Json::Value root;
792  Json::Reader reader;
793  bool success = reader.parse( value, root );
794  if (!success)
795  // Raise exception
796  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
797 
798  try
799  {
800  // Set all values that match
801  SetJsonValue(root);
802  }
803  catch (exception e)
804  {
805  // Error parsing JSON (or missing keys)
806  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
807  }
808 }
809 
810 // Load Json::JsonValue into this object
811 void Clip::SetJsonValue(Json::Value root) {
812 
813  // Set parent data
815 
816  // Set data from Json (if key is found)
817  if (!root["gravity"].isNull())
818  gravity = (GravityType) root["gravity"].asInt();
819  if (!root["scale"].isNull())
820  scale = (ScaleType) root["scale"].asInt();
821  if (!root["anchor"].isNull())
822  anchor = (AnchorType) root["anchor"].asInt();
823  if (!root["display"].isNull())
824  display = (FrameDisplayType) root["display"].asInt();
825  if (!root["mixing"].isNull())
826  mixing = (VolumeMixType) root["mixing"].asInt();
827  if (!root["waveform"].isNull())
828  waveform = root["waveform"].asBool();
829  if (!root["scale_x"].isNull())
830  scale_x.SetJsonValue(root["scale_x"]);
831  if (!root["scale_y"].isNull())
832  scale_y.SetJsonValue(root["scale_y"]);
833  if (!root["location_x"].isNull())
834  location_x.SetJsonValue(root["location_x"]);
835  if (!root["location_y"].isNull())
836  location_y.SetJsonValue(root["location_y"]);
837  if (!root["alpha"].isNull())
838  alpha.SetJsonValue(root["alpha"]);
839  if (!root["rotation"].isNull())
840  rotation.SetJsonValue(root["rotation"]);
841  if (!root["time"].isNull())
842  time.SetJsonValue(root["time"]);
843  if (!root["volume"].isNull())
844  volume.SetJsonValue(root["volume"]);
845  if (!root["wave_color"].isNull())
846  wave_color.SetJsonValue(root["wave_color"]);
847  if (!root["crop_width"].isNull())
848  crop_width.SetJsonValue(root["crop_width"]);
849  if (!root["crop_height"].isNull())
850  crop_height.SetJsonValue(root["crop_height"]);
851  if (!root["crop_x"].isNull())
852  crop_x.SetJsonValue(root["crop_x"]);
853  if (!root["crop_y"].isNull())
854  crop_y.SetJsonValue(root["crop_y"]);
855  if (!root["shear_x"].isNull())
856  shear_x.SetJsonValue(root["shear_x"]);
857  if (!root["shear_y"].isNull())
858  shear_y.SetJsonValue(root["shear_y"]);
859  if (!root["channel_filter"].isNull())
860  channel_filter.SetJsonValue(root["channel_filter"]);
861  if (!root["channel_mapping"].isNull())
862  channel_mapping.SetJsonValue(root["channel_mapping"]);
863  if (!root["has_audio"].isNull())
864  has_audio.SetJsonValue(root["has_audio"]);
865  if (!root["has_video"].isNull())
866  has_video.SetJsonValue(root["has_video"]);
867  if (!root["perspective_c1_x"].isNull())
868  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
869  if (!root["perspective_c1_y"].isNull())
870  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
871  if (!root["perspective_c2_x"].isNull())
872  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
873  if (!root["perspective_c2_y"].isNull())
874  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
875  if (!root["perspective_c3_x"].isNull())
876  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
877  if (!root["perspective_c3_y"].isNull())
878  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
879  if (!root["perspective_c4_x"].isNull())
880  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
881  if (!root["perspective_c4_y"].isNull())
882  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
883  if (!root["effects"].isNull()) {
884 
885  // Clear existing effects
886  effects.clear();
887 
888  // loop through effects
889  for (int x = 0; x < root["effects"].size(); x++) {
890  // Get each effect
891  Json::Value existing_effect = root["effects"][x];
892 
893  // Create Effect
894  EffectBase *e = NULL;
895 
896  if (!existing_effect["type"].isNull()) {
897  // Create instance of effect
898  if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
899 
900  // Load Json into Effect
901  e->SetJsonValue(existing_effect);
902 
903  // Add Effect to Timeline
904  AddEffect(e);
905  }
906  }
907  }
908  }
909  if (!root["reader"].isNull()) // does Json contain a reader?
910  {
911  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
912  {
913  // Close previous reader (if any)
914  bool already_open = false;
915  if (reader)
916  {
917  // Track if reader was open
918  already_open = reader->IsOpen();
919 
920  // Close and delete existing reader (if any)
921  reader->Close();
922  delete reader;
923  reader = NULL;
924  }
925 
926  // Create new reader (and load properties)
927  string type = root["reader"]["type"].asString();
928 
929  if (type == "FFmpegReader") {
930 
931  // Create new reader
932  reader = new FFmpegReader(root["reader"]["path"].asString(), false);
933  reader->SetJsonValue(root["reader"]);
934 
935  } else if (type == "QtImageReader") {
936 
937  // Create new reader
938  reader = new QtImageReader(root["reader"]["path"].asString(), false);
939  reader->SetJsonValue(root["reader"]);
940 
941 #ifdef USE_IMAGEMAGICK
942  } else if (type == "ImageReader") {
943 
944  // Create new reader
945  reader = new ImageReader(root["reader"]["path"].asString(), false);
946  reader->SetJsonValue(root["reader"]);
947 
948  } else if (type == "TextReader") {
949 
950  // Create new reader
951  reader = new TextReader();
952  reader->SetJsonValue(root["reader"]);
953 #endif
954 
955  } else if (type == "ChunkReader") {
956 
957  // Create new reader
958  reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
959  reader->SetJsonValue(root["reader"]);
960 
961  } else if (type == "DummyReader") {
962 
963  // Create new reader
964  reader = new DummyReader();
965  reader->SetJsonValue(root["reader"]);
966  }
967 
968  // mark as managed reader and set parent
969  if (reader) {
970  reader->SetClip(this);
971  manage_reader = true;
972  }
973 
974  // Re-Open reader (if needed)
975  if (already_open)
976  reader->Open();
977 
978  }
979  }
980 }
981 
982 // Sort effects by order
983 void Clip::sort_effects()
984 {
985  // sort clips
986  effects.sort(CompareClipEffects());
987 }
988 
989 // Add an effect to the clip
991 {
992  // Add effect to list
993  effects.push_back(effect);
994 
995  // Sort effects
996  sort_effects();
997 }
998 
999 // Remove an effect from the clip
1001 {
1002  effects.remove(effect);
1003 }
1004 
1005 // Apply effects to the source frame (if any)
1006 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1007 {
1008  // Find Effects at this position and layer
1009  list<EffectBase*>::iterator effect_itr;
1010  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1011  {
1012  // Get clip object from the iterator
1013  EffectBase *effect = (*effect_itr);
1014 
1015  // Apply the effect to this frame
1016  frame = effect->GetFrame(frame, frame->number);
1017 
1018  } // end effect loop
1019 
1020  // Return modified frame
1021  return frame;
1022 }
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
Definition: KeyFrame.h:93
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:102
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:244
Display the timeline's frame number.
Definition: Enums.h:69
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Close()
Close the internal reader.
Definition: Clip.cpp:259
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:215
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:246
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:240
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:242
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:234
string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:60
float End()
Override End() method.
Definition: Clip.cpp:273
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:245
Align clip to the bottom right of its parent.
Definition: Enums.h:45
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:352
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Definition: Clip.h:145
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:225
Do not scale the clip.
Definition: Enums.h:54
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
This class is used as a simple, dummy reader, which always returns a blank frame. ...
Definition: DummyReader.h:53
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:74
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:77
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
Definition: Clip.h:224
Json::Value add_property_json(string name, float value, string type, string memo, Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame)
Generate JSON for a property.
Definition: ClipBase.cpp:65
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:49
void AddEffect(EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:990
virtual void Close()=0
Close the reader (and any resources it was consuming)
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:97
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:82
void SetClip(ClipBase *clip)
Set parent clip object of this reader.
Definition: ReaderBase.cpp:257
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:254
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
~Clip()
Destructor.
Definition: Clip.cpp:203
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:255
Do not display the frame number.
Definition: Enums.h:67
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:295
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:228
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:81
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:811
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:393
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:232
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:76
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:67
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:216
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:217
virtual std::shared_ptr< Frame > GetFrame(int64_t number)=0
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:94
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ClipBase.cpp:49
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:241
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:235
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:238
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
Definition: Clip.h:146
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Align clip to the top left of its parent.
Definition: Enums.h:37
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:101
string Id()
Get basic properties.
Definition: ClipBase.h:80
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any). Useful for debugging.
Definition: Enums.h:65
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:250
int64_t GetLength()
Definition: KeyFrame.cpp:530
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:81
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:148
void SetJson(string value)
Load JSON string into this object.
Definition: Clip.cpp:788
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:251
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:58
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:114
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:121
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
Definition: ChunkReader.h:73
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:221
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:169
Fraction GetRepeatFraction(int64_t index)
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:420
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip's internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
double GetDelta(int64_t index)
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:467
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:239
string PropertiesJSON(int64_t requested_frame)
Definition: Clip.cpp:655
Clip()
Default Constructor.
Definition: Clip.cpp:134
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:58
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:59
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
std::map< string, string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:87
void Open()
Open the internal reader.
Definition: Clip.cpp:242
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:78
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:231
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Display both the clip's and timeline's frame number.
Definition: Enums.h:70
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1000
AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:147
Exception for invalid JSON.
Definition: Exceptions.h:152
int64_t GetLong(int64_t index)
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:270
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:220
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:214
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:243
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: Clip.h:103
VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:149
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ClipBase.cpp:33
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:81
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:64
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:247
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:521
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:730
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:85
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:35
Anchor the clip to the canvas.
Definition: Enums.h:60
string Json()
Get and Set JSON methods.
Definition: Clip.cpp:648
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:83
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
Exception when too many seek attempts happen.
Definition: Exceptions.h:254
ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:232
virtual bool IsOpen()=0
Determine if reader is open or closed.
This class is used to resample audio data for many sequential frames.
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:233