31 #include "../include/Timeline.h" 37 is_open(false), auto_map_clips(true), managed_cache(true)
82 std::set<FrameMapper *>::iterator it;
83 for (it = allocated_frame_mappers.begin(); it != allocated_frame_mappers.end(); ) {
90 it = allocated_frame_mappers.erase(it);
94 if (managed_cache && final_cache) {
106 apply_mapper_to_clip(clip);
109 clips.push_back(clip);
119 effects.push_back(effect);
128 effects.remove(effect);
138 void Timeline::apply_mapper_to_clip(
Clip* clip)
145 if (clip->
Reader()->Name() ==
"FrameMapper")
154 allocated_frame_mappers.insert(mapper);
163 clip->
Reader(clip_reader);
173 for (
auto clip : clips)
176 apply_mapper_to_clip(clip);
181 double Timeline::calculate_time(int64_t number,
Fraction rate)
184 double raw_fps = rate.
ToFloat();
187 return double(number - 1) / raw_fps;
191 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number,
int layer)
197 for (
auto effect : effects)
200 long effect_start_position = round(effect->Position() *
info.
fps.
ToDouble()) + 1;
201 long effect_end_position = round((effect->Position() + (effect->Duration())) *
info.
fps.
ToDouble()) + 1;
203 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
206 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Does effect intersect)",
"effect->Position()", effect->Position(),
"does_effect_intersect", does_effect_intersect,
"timeline_frame_number", timeline_frame_number,
"layer", layer);
209 if (does_effect_intersect)
213 long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
216 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Process Effect)",
"effect_frame_number", effect_frame_number,
"does_effect_intersect", does_effect_intersect);
219 frame = effect->GetFrame(frame, effect_frame_number);
229 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(
Clip* clip, int64_t number)
231 std::shared_ptr<Frame> new_frame;
241 #pragma omp critical (T_GetOtCreateFrame) 242 new_frame = std::shared_ptr<Frame>(clip->
GetFrame(number));
260 #pragma omp critical (T_GetOtCreateFrame) 269 void Timeline::add_layer(std::shared_ptr<Frame> new_frame,
Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number,
bool is_top_clip,
float max_volume)
272 std::shared_ptr<Frame> source_frame;
273 #pragma omp critical (T_addLayer) 274 source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
281 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer",
"new_frame->number", new_frame->number,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number);
287 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Generate Waveform Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number);
296 std::shared_ptr<QImage> source_image;
297 #pragma omp critical (T_addLayer) 299 source_frame->AddImage(std::shared_ptr<QImage>(source_image));
304 if (is_top_clip && source_frame) {
305 #pragma omp critical (T_addLayer) 306 source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->
Layer());
310 std::shared_ptr<QImage> source_image;
313 if (source_clip->
Reader()->info.has_audio) {
315 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Copy Audio)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number);
318 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
321 float previous_volume = source_clip->
volume.
GetValue(clip_frame_number - 1);
329 previous_volume = previous_volume / max_volume;
330 volume = volume / max_volume;
334 previous_volume = previous_volume * 0.77;
335 volume = volume * 0.77;
339 if (channel_filter != -1 && channel_filter != channel)
343 if (previous_volume == 0.0 && volume == 0.0)
347 if (channel_mapping == -1)
348 channel_mapping = channel;
351 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
352 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
358 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
360 #pragma omp critical (T_addLayer)
365 #pragma omp critical (T_addLayer) 366 new_frame->AddAudio(
false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
371 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number);
377 (!source_clip->
Waveform() && !source_clip->
Reader()->info.has_video))
382 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Get Source Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number);
385 source_image = source_frame->GetImage();
390 float alpha = source_clip->
alpha.
GetValue(clip_frame_number);
393 unsigned char *pixels = (
unsigned char *) source_image->bits();
396 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
399 int A = pixels[byte_index + 3];
402 pixels[byte_index + 3] *= alpha;
406 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Set Alpha & Opacity)",
"alpha", alpha,
"source_frame->number", source_frame->number,
"clip_frame_number", clip_frame_number);
410 QSize source_size = source_image->size();
411 switch (source_clip->
scale)
418 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_FIT)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
426 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_STRETCH)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
435 source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
437 source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
440 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_CROP)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
447 float source_width_ratio = source_size.width() / float(
info.
width);
448 float source_height_ratio = source_size.height() / float(
info.
height);
452 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_NONE)",
"source_frame->number", source_frame->number,
"source_width", source_size.width(),
"source_height", source_size.height());
504 float scaled_source_width = source_size.width() * sx;
505 float scaled_source_height = source_size.height() * sy;
543 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Gravity)",
"source_frame->number", source_frame->number,
"source_clip->gravity", source_clip->
gravity,
"info.width",
info.
width,
"scaled_source_width", scaled_source_width,
"info.height",
info.
height,
"scaled_source_height", scaled_source_height);
552 bool transformed =
false;
553 QTransform transform;
556 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Build QTransform - if needed)",
"source_frame->number", source_frame->number,
"x", x,
"y", y,
"r", r,
"sx", sx,
"sy", sy);
558 if (!isEqual(r, 0)) {
560 float origin_x = x + (scaled_source_width / 2.0);
561 float origin_y = y + (scaled_source_height / 2.0);
562 transform.translate(origin_x, origin_y);
564 transform.translate(-origin_x,-origin_y);
568 if (!isEqual(x, 0) || !isEqual(y, 0)) {
570 transform.translate(x, y);
575 float source_width_scale = (float(source_size.width()) /
float(source_image->width())) * sx;
576 float source_height_scale = (float(source_size.height()) /
float(source_image->height())) * sy;
578 if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
579 transform.scale(source_width_scale, source_height_scale);
583 if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
585 transform.shear(shear_x, shear_y);
590 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Prepare)",
"source_frame->number", source_frame->number,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed);
593 std::shared_ptr<QImage> new_image;
594 #pragma omp critical (T_addLayer) 595 new_image = new_frame->GetImage();
598 QPainter painter(new_image.get());
599 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
603 painter.setTransform(transform);
606 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
607 painter.drawImage(0, 0, *source_image, crop_x * source_image->width(), crop_y * source_image->height(), crop_w * source_image->width(), crop_h * source_image->height());
611 std::stringstream frame_number_str;
619 frame_number_str << clip_frame_number;
623 frame_number_str << timeline_frame_number;
627 frame_number_str << timeline_frame_number <<
" (" << clip_frame_number <<
")";
632 painter.setPen(QColor(
"#ffffff"));
633 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
639 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Completed)",
"source_frame->number", source_frame->number,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed);
643 void Timeline::update_open_clips(
Clip *clip,
bool does_clip_intersect)
645 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (before)",
"does_clip_intersect", does_clip_intersect,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size());
648 bool clip_found = open_clips.count(clip);
650 if (clip_found && !does_clip_intersect)
653 open_clips.erase(clip);
658 else if (!clip_found && does_clip_intersect)
661 open_clips[clip] = clip;
673 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (after)",
"does_clip_intersect", does_clip_intersect,
"clip_found", clip_found,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size());
677 void Timeline::sort_clips()
687 void Timeline::sort_effects()
699 for (
auto clip : clips)
702 update_open_clips(clip,
false);
709 final_cache->
Clear();
719 bool Timeline::isEqual(
double a,
double b)
721 return fabs(a - b) < 0.000001;
728 if (requested_frame < 1)
732 std::shared_ptr<Frame> frame;
733 #pragma omp critical (T_GetFrame) 734 frame = final_cache->
GetFrame(requested_frame);
749 throw ReaderClosed(
"The Timeline is closed. Call Open() before calling this method.");
752 #pragma omp critical (T_GetFrame) 753 frame = final_cache->
GetFrame(requested_frame);
767 std::vector<Clip*> nearby_clips;
768 #pragma omp critical (T_GetFrame) 769 nearby_clips = find_intersecting_clips(requested_frame, minimum_frames,
true);
773 omp_set_nested(
true);
780 for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
783 for (
auto clip : nearby_clips)
788 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
789 if (does_clip_intersect)
793 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
803 #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1) 804 for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
814 #pragma omp critical (T_GetFrame) 816 new_frame->AddAudioSilence(samples_in_frame);
830 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Loop through clips)",
"frame_number", frame_number,
"clips.size()", clips.size(),
"nearby_clips.size()", nearby_clips.size());
833 for (
auto clip : nearby_clips)
838 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
844 if (does_clip_intersect)
847 bool is_top_clip =
true;
848 float max_volume = 0.0;
849 for (
auto nearby_clip : nearby_clips)
851 long nearby_clip_start_position = round(nearby_clip->Position() *
info.
fps.
ToDouble()) + 1;
852 long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) *
info.
fps.
ToDouble()) + 1;
853 long nearby_clip_start_frame = (nearby_clip->Start() *
info.
fps.
ToDouble()) + 1;
854 long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
857 if (clip->
Id() != nearby_clip->Id() && clip->
Layer() == nearby_clip->Layer() &&
858 nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
859 nearby_clip_start_position > clip_start_position && is_top_clip ==
true) {
864 if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
865 nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
866 nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
867 max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
873 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
879 add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
893 new_frame->SetFrameNumber(frame_number);
896 final_cache->
Add(new_frame);
906 return final_cache->
GetFrame(requested_frame);
912 std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame,
int number_of_frames,
bool include)
915 std::vector<Clip*> matching_clips;
918 float min_requested_frame = requested_frame;
919 float max_requested_frame = requested_frame + (number_of_frames - 1);
925 for (
auto clip : clips)
931 bool does_clip_intersect =
932 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
933 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
936 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::find_intersecting_clips (Is clip near or intersecting)",
"requested_frame", requested_frame,
"min_requested_frame", min_requested_frame,
"max_requested_frame", max_requested_frame,
"clip->Position()", clip->
Position(),
"does_clip_intersect", does_clip_intersect);
939 #pragma omp critical (reader_lock) 940 update_open_clips(clip, does_clip_intersect);
943 if (does_clip_intersect && include)
945 matching_clips.push_back(clip);
947 else if (!does_clip_intersect && !include)
949 matching_clips.push_back(clip);
954 return matching_clips;
960 if (managed_cache && final_cache) {
963 managed_cache =
false;
967 final_cache = new_cache;
982 root[
"type"] =
"Timeline";
989 root[
"clips"] = Json::Value(Json::arrayValue);
992 for (
const auto existing_clip : clips)
994 root[
"clips"].append(existing_clip->JsonValue());
998 root[
"effects"] = Json::Value(Json::arrayValue);
1001 for (
const auto existing_effect: effects)
1003 root[
"effects"].append(existing_effect->JsonValue());
1023 catch (
const std::exception& e)
1026 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1034 bool was_open = is_open;
1040 if (!root[
"clips"].isNull()) {
1045 for (
const Json::Value existing_clip : root[
"clips"]) {
1057 if (!root[
"effects"].isNull()) {
1062 for (
const Json::Value existing_effect :root[
"effects"]) {
1066 if (!existing_effect[
"type"].isNull()) {
1068 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString())) ) {
1080 if (!root[
"duration"].isNull()) {
1102 for (
const Json::Value change : root) {
1103 std::string change_key = change[
"key"][(uint)0].asString();
1106 if (change_key ==
"clips")
1108 apply_json_to_clips(change);
1110 else if (change_key ==
"effects")
1112 apply_json_to_effects(change);
1116 apply_json_to_timeline(change);
1120 catch (
const std::exception& e)
1123 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1128 void Timeline::apply_json_to_clips(Json::Value change) {
1131 std::string change_type = change[
"type"].asString();
1132 std::string clip_id =
"";
1133 Clip *existing_clip = NULL;
1136 for (
auto key_part : change[
"key"]) {
1138 if (key_part.isObject()) {
1140 if (!key_part[
"id"].isNull()) {
1142 clip_id = key_part[
"id"].asString();
1145 for (
auto c : clips)
1147 if (c->Id() == clip_id) {
1159 if (existing_clip && change[
"key"].size() == 4 && change[
"key"][2] ==
"effects")
1162 Json::Value key_part = change[
"key"][3];
1164 if (key_part.isObject()) {
1166 if (!key_part[
"id"].isNull())
1169 std::string effect_id = key_part[
"id"].asString();
1172 std::list<EffectBase*> effect_list = existing_clip->
Effects();
1173 for (
auto e : effect_list)
1175 if (e->Id() == effect_id) {
1177 apply_json_to_effects(change, e);
1182 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1192 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1193 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1194 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1195 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1199 if (change_type ==
"insert") {
1207 apply_mapper_to_clip(clip);
1209 }
else if (change_type ==
"update") {
1212 if (existing_clip) {
1217 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1220 if (existing_clip->
Reader() && existing_clip->
Reader()->GetCache())
1221 existing_clip->
Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1227 apply_mapper_to_clip(existing_clip);
1230 }
else if (change_type ==
"delete") {
1233 if (existing_clip) {
1238 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1249 void Timeline::apply_json_to_effects(Json::Value change) {
1252 std::string change_type = change[
"type"].asString();
1256 for (
auto key_part : change[
"key"]) {
1258 if (key_part.isObject()) {
1260 if (!key_part[
"id"].isNull())
1263 std::string effect_id = key_part[
"id"].asString();
1266 for (
auto e : effects)
1268 if (e->Id() == effect_id) {
1269 existing_effect = e;
1279 if (existing_effect || change_type ==
"insert")
1281 apply_json_to_effects(change, existing_effect);
1285 void Timeline::apply_json_to_effects(Json::Value change,
EffectBase* existing_effect) {
1288 std::string change_type = change[
"type"].asString();
1291 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1292 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1293 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1294 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1298 if (change_type ==
"insert") {
1301 std::string effect_type = change[
"value"][
"type"].asString();
1307 if ( (e =
EffectInfo().CreateEffect(effect_type)) ) {
1316 }
else if (change_type ==
"update") {
1319 if (existing_effect) {
1324 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1330 }
else if (change_type ==
"delete") {
1333 if (existing_effect) {
1338 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1348 void Timeline::apply_json_to_timeline(Json::Value change) {
1351 std::string change_type = change[
"type"].asString();
1352 std::string root_key = change[
"key"][(uint)0].asString();
1353 std::string sub_key =
"";
1354 if (change[
"key"].size() >= 2)
1355 sub_key = change[
"key"][(uint)1].asString();
1358 final_cache->
Clear();
1361 if (change_type ==
"insert" || change_type ==
"update") {
1365 if (root_key ==
"color")
1368 else if (root_key ==
"viewport_scale")
1371 else if (root_key ==
"viewport_x")
1374 else if (root_key ==
"viewport_y")
1377 else if (root_key ==
"duration") {
1382 else if (root_key ==
"width")
1385 else if (root_key ==
"height")
1388 else if (root_key ==
"fps" && sub_key ==
"" && change[
"value"].isObject()) {
1390 if (!change[
"value"][
"num"].isNull())
1391 info.
fps.
num = change[
"value"][
"num"].asInt();
1392 if (!change[
"value"][
"den"].isNull())
1393 info.
fps.
den = change[
"value"][
"den"].asInt();
1395 else if (root_key ==
"fps" && sub_key ==
"num")
1398 else if (root_key ==
"fps" && sub_key ==
"den")
1401 else if (root_key ==
"display_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1403 if (!change[
"value"][
"num"].isNull())
1405 if (!change[
"value"][
"den"].isNull())
1408 else if (root_key ==
"display_ratio" && sub_key ==
"num")
1411 else if (root_key ==
"display_ratio" && sub_key ==
"den")
1414 else if (root_key ==
"pixel_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1416 if (!change[
"value"][
"num"].isNull())
1418 if (!change[
"value"][
"den"].isNull())
1421 else if (root_key ==
"pixel_ratio" && sub_key ==
"num")
1424 else if (root_key ==
"pixel_ratio" && sub_key ==
"den")
1428 else if (root_key ==
"sample_rate")
1431 else if (root_key ==
"channels")
1434 else if (root_key ==
"channel_layout")
1439 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1442 }
else if (change[
"type"].asString() ==
"delete") {
1446 if (root_key ==
"color") {
1452 else if (root_key ==
"viewport_scale")
1454 else if (root_key ==
"viewport_x")
1456 else if (root_key ==
"viewport_y")
1460 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1473 final_cache->
Clear();
1476 for (
auto clip : clips)
1479 clip->
Reader()->GetCache()->Clear();
1482 if (clip->
Reader()->Name() ==
"FrameMapper") {
1499 display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Display the timeline's frame number.
void Close()
Close the internal reader.
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render) ...
int num
Numerator for the fraction.
ReaderBase * Reader()
Get the current reader.
This abstract class is the base class, used by all effects in libopenshot.
Align clip to the right of its parent (middle aligned)
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
std::string Id() const
Get basic properties.
openshot::Color wave_color
Curve representing the color of the audio wave form.
Align clip to the bottom right of its parent.
void SetCache(CacheBase *new_cache)
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
float Start() const
Get start position (in seconds) of clip (trim start of video)
int width
The width of the video (in pixesl)
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
float duration
Length of time (in seconds)
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
openshot::Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Fraction Reciprocal()
Return the reciprocal as a Fraction.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
void Close()
Close the openshot::FrameMapper and internal reader.
const Json::Value stringToJson(const std::string value)
This abstract class is the base class, used by all readers in libopenshot.
#define OPEN_MP_NUM_PROCESSORS
openshot::FrameDisplayType display
The format to display the frame number (if any)
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Do not display the frame number.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Align clip to the top right of its parent.
Align clip to the bottom left of its parent.
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Exception for missing JSON Change key.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
bool has_audio
Determines if this file has an audio stream.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
This class represents a clip (used to arrange readers on the timeline)
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render) ...
bool Waveform()
Waveform property.
int64_t video_length
The number of frames in the video stream.
openshot::Keyframe channel_filter
Audio channel filter and mappings.
openshot::Keyframe green
Curve representing the green value (0 - 255)
int height
The height of the video (in pixels)
Align clip to the bottom center of its parent.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
void SetMaxSize(int width, int height)
Align clip to the top left of its parent.
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Exception for files that can not be found or opened.
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
static CrashHandler * Instance()
Json::Value JsonValue() const override
Generate Json::Value for this object.
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
This class represents a fraction.
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
openshot::Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
All cache managers in libopenshot are based on this CacheBase class.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
void Close()
Close the timeline reader (and any resources it was consuming)
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Scale the clip until both height and width fill the canvas (distort to fit)
Display the clip's internal frame number.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::ReaderInfo info
Information about the current media file.
std::string Json() const override
Get and Set JSON methods.
Exception for frames that are out of bounds.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void Open()
Open the internal reader.
This class represents a color (used on the timeline and clips)
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Align clip to the center of its parent (middle aligned)
void Open()
Open the reader (and start consuming resources)
float Duration() const
Get the length of this clip (in seconds)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Display both the clip's and timeline's frame number.
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Json::Value JsonValue() const
Generate Json::Value for this object.
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
void SetJson(const std::string value)
Load JSON string into this object.
Exception for invalid JSON.
int64_t GetCount() const
Get the number of points (i.e. # of points)
double GetValue(int64_t index) const
Get the value at a specific index.
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3) ...
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
openshot::Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square) ...
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Color color
Background color of timeline canvas.
float Position() const
Get position on timeline (in seconds)
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
This class returns a listing of all effects supported by libopenshot.
Align clip to the top center of its parent.
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
openshot::Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
openshot::Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
This class is a memory-based cache manager for Frame objects.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Exception when too many seek attempts happen.
Json::Value JsonValue() const
Generate Json::Value for this object.