From e091865960104780aa7701ba7122bbbbc1276198 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 9 Jun 2025 11:04:04 -0700 Subject: [PATCH 01/30] added otiodiff script Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 653 ++++++++++++++++++ 1 file changed, 653 insertions(+) create mode 100644 src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py new file mode 100644 index 000000000..a22fa8883 --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -0,0 +1,653 @@ +import argparse +import os +import copy +from collections import namedtuple + +import opentimelineio as otio + +from .clipData import ClipData +from . import makeOtio + +# set otio version to 0.17 +os.environ["OTIO_DEFAULT_TARGET_VERSION_FAMILY_LABEL"] = "OTIO_CORE:0.17.0" + +def main(fileA, fileB): + + # parser = argparse.ArgumentParser(description="compare two .otio files with flattened video tracks (one video track only)") + # parser.add_argument("fileA", metavar="fileA", type=str, help="file path to otio file") + # parser.add_argument("fileB", metavar="fileB", type=str, help="file path to otio file") + # parser.add_argument("--display", metavar="display", type=str, help="Specify how the new otio file displays info. Options: 'stack', 'inline', or 'full'") + # parser.add_argument("--flatten", action='store_true', help="Toggle to flatten input files") + + # args = parser.parse_args() + + # assert(fileA[-5:] == ".otio"), "File A is not an otio file" + # assert(fileB[-5:] == ".otio"), "File B is not an otio file" + + # tlA = otio.adapters.read_from_file(fileA) + # tlB = otio.adapters.read_from_file(fileB) + tlA = fileA + tlB = fileB + + # old implmentation + # videoTl = processSingleTrack(tlA, tlB) + + # new implementation that can process inputs with multiple tracks + # displayMode = None + + # if args.display is not None: + # displayMode = args.display.lower() + # displaySettings = ("inline", "stack", "full", "simple") + # if displayMode not in displaySettings: + # print("Not a recognized display mode, defaulting to 'simple'.") + # displayMode = "simple" + + + # videoTl = processAllTracks(tlA, tlB, "video", displayMode) + videoTl = processAllTracksAB(tlA, tlB) + + # # audio only + # audioTl = processAllTracks(tlA, tlB, "audio") + + # # both + # allTl = processAllTracks(tlA, tlB, "all") + # setDisplay(args.display.lower()) + + origClipCount = 0 + for t in tlA.video_tracks(): + origClipCount += len(t.find_clips()) + + for t in tlB.video_tracks(): + origClipCount += len(t.find_clips()) + + print(origClipCount) + print(len(videoTl.find_clips())) + # assert(len(tlA.find_clips()) + len(tlB.find_clips()) == len(videoTl.find_clips())), "Clip count doesn't match across two timelines" + + # commented out display for now + return videoTl + +def toOtio(data, path): + otio.adapters.write_to_file(data, path) + +# for debugging, put response into file +def toJson(file): + with open("clipDebug.json", "w") as f: + f.write(file) + +def toTxt(file): + with open("report.txt", "w") as f: + f.write(file) + +# create a dictionary with all the cloned clips (ones that share the same truncated name) +# key is the truncated name, value is a list of ClipDatas +# @parameter clips, list of ClipDatas +def findClones(clips): + clones = {} + nonClones = [] + names = [] + + for c in clips: + names.append(c.name) + + for c in clips: + if c.name in clones: + clones[c.name].append(c) + elif names.count(c.name) > 1: + clones[c.name] = [c] + else: + nonClones.append(c) + + return clones, nonClones + +def sortClones(clipDatasA, clipDatasB): + # find cloned clips and separate out from unique clips + clonesA, nonClonesA = findClones(clipDatasA) + clonesB, nonClonesB = findClones(clipDatasB) + + # move clips that are clones in the other files into the clones folder + # leaves stricly unique clips in nonClones + # if a clip is a clone in the other timeline, put into clones dictionary + for c in nonClonesA: + if c.name in clonesB.keys(): + clonesA[c.name] = [c] + nonClonesA.remove(c) + # print("clone in B file: ", c.name) + for c in nonClonesB: + if c.name in clonesA.keys(): + clonesB[c.name] = [c] + nonClonesB.remove(c) + # print("clone in A file: ", c.name) + + # clipCountA = 0 + # clipCountB = 0 + return (clonesA, nonClonesA), (clonesB, nonClonesB) + +# compare all clips that had a clone +def compareClones(clonesA, clonesB): + added = [] + unchanged = [] + deleted = [] + + for nameB in clonesB: + # if there are no clips in timeline A with the same name + # as cloneB, all of the clones of cloneB are new and added + # print("name b: ", nameB) + if nameB not in clonesA: + added.extend(clonesB[nameB]) + + # name matched, there exists clones in both A and B, check if there are same clips + # technically can be the first one is "edited" and the rest are "added"/"deleted" -> depends on how want to define + # currently, all clones that aren't the exact same get categorized as either "added" or "deleted" + else: + clipsA = clonesA[nameB] + clipsB = clonesB[nameB] + + for clipB in clipsB: + for clipA in clipsA: + isSame = clipB.checkSame(clipA) + if(isSame): + unchanged.append(clipB) + else: + if(clipB not in added): + added.append(clipB) + if(clipA not in deleted): + deleted.append(clipA) + + # same as above for deleted clips + for nameA in clonesA: + if nameA not in clonesB: + deleted.extend(clonesA[nameA]) + + # print("from clones added: ", len(added), " deleted: ", len(deleted)) + + return added, unchanged, deleted + +# compare all strictly unique clips +def compareClips(clipDatasA, clipDatasB): + namesA = {} + namesB = {} + + added = [] + edited = [] + unchanged = [] + deleted = [] + + for c in clipDatasA: + namesA[c.name] = c + for c in clipDatasB: + namesB[c.name] = c + + for cB in clipDatasB: + if cB.name not in namesA: + added.append(cB) + else: + isSame = cB.checkSame(namesA[cB.name]) + if(isSame): + unchanged.append(cB) + else: + isEdited = cB.checkEdited(namesA[cB.name]) + if(isEdited): + cB.pair = namesA[cB.name] + edited.append(cB) + else: + print("======== not categorized ==========") + cA = namesA[cB.name] + print("Clips: ", cA.name, cB.name) + # cA.printData() + # cB.printData() + # print("===================") + # print type of object + + for cA in clipDatasA: + if cA.name not in namesB: + deleted.append(cA) + + return added, edited, unchanged, deleted + +def processVideo(videoTrackA, videoTrackB): + clipDatasA = [] + clipDatasB = [] + + for c in videoTrackA.find_clips(): + take = None + if(len(c.name.split(" ")) > 1): + take = c.name.split(" ")[1] + else: + take = None + cd = ClipData(c.name.split(" ")[0], + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c, + take) + clipDatasA.append(cd) + + for c in videoTrackB.find_clips(): + take = None + if(len(c.name.split(" ")) > 1): + take = c.name.split(" ")[1] + else: + take = None + cd = ClipData(c.name.split(" ")[0], + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c, + take) + clipDatasB.append(cd) + + (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) + + # compare clips and put into categories + addV = [] + editV = [] + sameV = [] + deleteV = [] + + # compare and categorize unique clips + addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) + + # compare and categorize cloned clips + addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) + addV.extend(addCloneV) + sameV.extend(sameCloneV) + deleteV.extend(deleteCloneV) + + return addV, editV, sameV, deleteV + +def processAudio(audioTrackA, audioTrackB): + addA = [] + editA = [] + sameA = [] + deleteA = [] + + audioClipDatasA = [] + audioClipDatasB = [] + + for c in audioTrackA.find_clips(): + cd = ClipData(c.name, + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c) + audioClipDatasA.append(cd) + + for c in audioTrackB.find_clips(): + cd = ClipData(c.name, + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c) + audioClipDatasB.append(cd) + + addA, editA, sameA, deleteA = compareClips(audioClipDatasA, audioClipDatasB) + + return addA, editA, sameA, deleteA + +# clip is an otio Clip +def getTake(clip): + take = None + if(len(clip.name.split(" ")) > 1): + take = clip.name.split(" ")[1] + else: + take = None + return take + +# the consolidated version of processVideo and processAudio, meant to replace both +def compareTracks(trackA, trackB): + clipDatasA = [] + clipDatasB = [] + + for c in trackA.find_clips(): + # put clip info into ClipData + cd = ClipData(c.name.split(" ")[0], + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c, + getTake(c)) + clipDatasA.append(cd) + + for c in trackB.find_clips(): + # put clip info into ClipData + cd = ClipData(c.name.split(" ")[0], + c.media_reference, + c.source_range, + c.trimmed_range_in_parent(), + c, + getTake(c)) + clipDatasB.append(cd) + + (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) + + # compare clips and put into categories + addV = [] + editV = [] + sameV = [] + deleteV = [] + + # compare and categorize unique clips + addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) + + # compare and categorize cloned clips + addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) + addV.extend(addCloneV) + sameV.extend(sameCloneV) + deleteV.extend(deleteCloneV) + + SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) + videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + + makeSummary(trackA, trackB, videoGroup) + + # return addV, editV, sameV, deleteV + return videoGroup + +# ============================= NEW FOR MULTITRACK ============================= +def processAllTracks(tlA, tlB, trackType, displayMode): + # determine which track set is shorter + assert(trackType is not None), "Missing type of track in function call" + # TODO add check that timeline track length is not 0 + + tracksA = None + tracksB = None + newTl = otio.schema.Timeline(name="timeline") + tempB = otio.schema.Timeline(name="timeline") + + if(trackType.lower() == "video"): + tracksA = tlA.video_tracks() + tracksB = tlB.video_tracks() + elif(trackType.lower() == "audio"): + tracksA = tlA.audio_tracks() + tracksB = tlB.audio_tracks() + elif(trackType.lower() == "all"): + print("show both video and audio") + + shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB + print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + + # Process Matched Video Tracks + # index through all the video tracks of the timeline with less tracks + tracksOfDels = [] + for i in range(0, len(shorterTlTracks)): + currTrackA = tracksA[i] + currTrackB = tracksB[i] + + videoGroup = compareTracks(currTrackA, currTrackB) + + # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + + # add processed tracks to display timeline + getTl = None + if displayMode is None: + print("Warning: Display mode not specified, defaulting to inline") + getTl = makeOtio.makeTimelineOfType("simple", currTrackA, currTrackB, videoGroup) + else: + # getTl = makeOtio.makeTimelineOfType(displayMode, currTrackA, currTrackB, videoGroup) + + # split delete out + getTl, tDelV = makeOtio.makeTimelineSplitDelete(currTrackA, currTrackB, videoGroup) + tracksOfDels.insert(0, tDelV) + + for t in getTl.tracks: + newTl.tracks.append(copy.deepcopy(t)) + print("current track stack size:", len(newTl.tracks)) + + # Process Unmatched Video Tracks + # mark unmatched tracks as either "added" or "deleted" and add to display timeline + if shorterTlTracks == tracksA: + # tlA is shorter so tlB has added tracks + for i in range(len(shorterTlTracks), len(tracksB)): + newTrack = tracksB[i] + for c in newTrack.find_clips(): + c = makeOtio.addRavenColor(c, "GREEN") + + # add to top of track stack + newTl.tracks.append(copy.deepcopy(newTrack)) + print("added unmatched track", len(newTl.tracks)) + else: + for i in range(len(shorterTlTracks), len(tracksA)): + # color clips + newTrack = tracksA[i] + for c in newTrack.find_clips(): + c = makeOtio.addRavenColor(c, "PINK") + + # add to bottom of track stack + # newTl.tracks.append(copy.deepcopy(newTrack)) + + # split delete out + # tracksOfDels.insert(0, newTrack) + + print("added unmatched track", len(newTl.tracks)) + + makeOtio.makeDeletes(newTl, tracksOfDels) + + return newTl + + +# maybe just loop through all of the tracks in A and then all of the tracks in B?? +# see if can simplify organization +def processAllTracksAB(tlA, tlB): + # determine which track set is shorter + # TODO add check that timeline track length is not 0 + + tracksA = tlA.video_tracks() + tracksB = tlB.video_tracks() + newTl = otio.schema.Timeline(name="timeline") + displayA = [] + displayB = [] + + + shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB + print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + + # Process Matched Video Tracks + # index through all the video tracks of the timeline with less tracks + for i in range(0, len(shorterTlTracks)): + currTrackA = tracksA[i] + currTrackB = tracksB[i] + + videoGroup = compareTracks(currTrackA, currTrackB) + + trackNum = i + 1 + newA = makeOtio.makeTrackA(videoGroup, trackNum) + displayA.append(newA) + + newB = makeOtio.makeTrackB(videoGroup, trackNum) + displayB.append(newB) + + if shorterTlTracks == tracksA: + # tlA is shorter so tlB has added tracks + for i in range(len(shorterTlTracks), len(tracksB)): + newTrack = tracksB[i] + for c in newTrack.find_clips(): + # c = makeOtio.addRavenColor(c, "GREEN") + newMarker = makeOtio.addMarker(c, "GREEN") + c.markers.append(newMarker) + + # add to top of track stack + displayB.append(copy.deepcopy(newTrack)) + # print("added unmatched track", len(newTl.tracks)) + else: + for i in range(len(shorterTlTracks), len(tracksA)): + # color clips + newTrack = tracksA[i] + for c in newTrack.find_clips(): + # c = makeOtio.addRavenColor(c, "PINK") + newMarker = makeOtio.addMarker(c, "PINK") + c.markers.append(newMarker) + displayA.append(copy.deepcopy(newTrack)) + + newTl.tracks.extend(displayA) + + newEmpty = makeOtio.makeEmptyTrack() + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayB) + + return newTl + # ================================================================================= + +# TODO: organize the current terminal print-out into a document/txt file +def makeSummary(tlA, tlB, videoGroup): + print("===================================") + print(" Overview Summary ") + print("===================================") + + # compare overall file duration + # if(tlB.duration() > tlA.duration()): + # delta = tlB.duration().to_seconds() - tlA.duration().to_seconds() + # print(f"timeline duration increased by {delta:.2f} seconds") + # elif(tlB.duration() < tlA.duration()): + # delta = tlA.duration().to_seconds() - tlB.duration().to_seconds() + # print(f"timeline duration decreased by {delta:.2f} seconds") + # print("") + + # print("======= Cloned Video Clips =======") + # print("Otio A:") + # for k in clonesA.keys(): + # print(k, ":", len(clonesA[k])) + # print("") + # print("Otio B:") + # for k in clonesB.keys(): + # print(k, ":", len(clonesB[k])) + + + print("======= Video Clip Info Overview =======") + print("added: ", len(videoGroup.add)) + for c in videoGroup.add: + print(c.name) + print("=======") + + print("edited: ", len(videoGroup.edit)) + for c in videoGroup.edit: + print(c.name) + print("=======") + + print("same: ", len(videoGroup.same)) + # for c in sameV: + # print(c.name) + # if(c["label"] == "moved"): + # print(c["name"], " ", c["label"]) + print("=======") + + print("deleted: ", len(videoGroup.delete)) + for c in videoGroup.delete: + print(c.name) + print("=======") + + +# TODO: add a flatten flag +def processSingleTrack(tlA, tlB): + assert len(tlA.video_tracks()) == 1, "File A contains more than 1 video track. Please flatten to a single track." + assert len(tlB.video_tracks()) == 1, "File B contains more than 1 video track. Please flatten to a single track." + + videoTrackA = tlA.video_tracks()[0] + videoTrackB = tlB.video_tracks()[0] + + # check for nested video tracks and stacks + assert(not videoTrackA.find_children(otio._otio.Track)), "File A contains nested track(s). Please flatten to a single track." + # assert(not videoTrackA.find_children(otio._otio.Stack)), "File A contains nested stack(s). Please flatten to a single track." + assert(not videoTrackB.find_children(otio._otio.Track)), "File B contains nested track(s). Please flatten to a single track." + # assert(not videoTrackB.find_children(otio._otio.Stack)), "File B contains nested stack(s). Please flatten to a single track." + + + # ====== VIDEO TRACK PROCESSING ====== + addV, editV, sameV, deleteV = processVideo(videoTrackA, videoTrackB) + + # ====== AUDIO TRACK PROCESSING ====== + # check if audio tracks exist + hasAudio = False + + if(len(tlA.audio_tracks()) != 0): + assert len(tlA.audio_tracks()) == 1, "File A contains more than 1 audio track" + hasAudio = True + if(len(tlB.audio_tracks()) != 0): + assert len(tlB.audio_tracks()) == 1, "File B contains more than 1 audio track" + hasAudio = True + + # if audio track(s) present, compare audio track(s) + if(hasAudio): + audioTrackA = tlA.audio_tracks()[0] + audioTrackB = tlB.audio_tracks()[0] + + addA, editA, sameA, deleteA = processAudio(audioTrackA, audioTrackB) + + # ====== MAKE NEW OTIO ====== + SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) + videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + + # check which display mode is toggled + if(args.display is None): + print("no display mode specified, defaulting to inline") + flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) + toOtio(flatTl) + + # multi-track output + elif(args.display.lower() == "stack"): + print("display mode: stack") + if(hasAudio): + audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) + stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup, audioGroup) + else: + stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup) + toOtio(stackTl) + + # single-track output + elif(args.display.lower() == "inline"): + print("display mode: inline") + if(hasAudio): + audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) + flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup, audioGroup) + + # flat track output + else: + flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) + toOtio(flatTl) + + # both multi and single track output + elif(args.display.lower() == "full"): + print("display mode: full") + if(hasAudio): + audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) + fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup, audioGroup) + else: + fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup) + toOtio(fullTl) + + else: + print("not an accepted display mode, no otios made") + +if __name__ == "__main__": + main() + +''' ======= Notes ======= + maybe can make use of algorithms.filter.filter_composition + +# a test using python difflib, prob not useful + # # find deltas of 2 files and print into html site + # d = HtmlDiff(wrapcolumn=100) + # diff = d.make_file(file1.splitlines(), file2.splitlines(), context=True) + # with open("diff.html", "w", encoding="utf-8") as f: + # f.write(diff) + + # s = SequenceMatcher(None, file1, file2) + # print(s.quick_ratio()) + + # each one in new check with each one in old + # if everything matches, unchanged <- can't just check with first instance because might have added one before it + # if everything matches except for timeline position-> moved + # if length doesn't match, look for ordering? or just classify as added/deleted + # if counts of old and new dif then def add/deleted + + + Test shot simple: + python ./src/getDif.py /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio + + Test seq matching edit's skywalker: + python ./src/getDif.py /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ChangeNotes.Relinked.01.otio /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio + + Test shot multitrack: + python ./src/getDif.py /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio +''' \ No newline at end of file From 619ca8b18dd9b837a49c7b547d36b592ef28a0a6 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Tue, 29 Jul 2025 17:51:34 -0700 Subject: [PATCH 02/30] added command line arg and function framework for otiodiff Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiotool.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index d6e055271..7c47df21d 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -112,6 +112,12 @@ def main(): for timeline in timelines: copy_media_to_folder(timeline, args.copy_media_to_folder) + # ===== NEW Phase 5.5: Diff otio files ====== + + if args.diff: + print("got diff from args") + diff_otio() + # Phase 6: Remove/Redaction if args.remove_metadata_key: @@ -443,6 +449,16 @@ def parse_arguments(): are supported. Use '-' to write OTIO to standard output.""" ) + # NEW ============== + parser.add_argument( + "--diff", + "-d", + action="store_true", + help="""Diff and compare two otio files""" + ) + + # ================== + args = parser.parse_args() # At least one of these must be specified @@ -480,6 +496,12 @@ def read_inputs(input_paths): timelines.append(timeline) return timelines +# ======= NEW ======= + +def diff_otio(): + print("hello world from diff otio") + +# =================== def keep_only_video_tracks(timeline): """Remove all tracks except for video tracks from a timeline.""" From ab4ecacd116f8def30d5ed053c63c07a4e5c40d9 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 6 Aug 2025 10:28:46 -0700 Subject: [PATCH 03/30] ported otiodiff into console scripts and added otiodiff as an option in otiotool Signed-off-by: Yingjie Wang --- .../console/otiodiff/__init__.py | 0 .../console/otiodiff/clipData.py | 123 +++++++ .../console/otiodiff/makeOtio.py | 341 ++++++++++++++++++ .../opentimelineio/console/otiotool.py | 16 +- 4 files changed, 475 insertions(+), 5 deletions(-) create mode 100644 src/py-opentimelineio/opentimelineio/console/otiodiff/__init__.py create mode 100644 src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py create mode 100644 src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/__init__.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py new file mode 100644 index 000000000..b2826e850 --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -0,0 +1,123 @@ +import opentimelineio as otio + +class ClipData: + name = "" + take = None + media_ref = None + source_range = otio.opentime.TimeRange() + timeline_range = otio.opentime.TimeRange() + note = "" + source = otio.schema.Clip() + pair = None + + def __init__(self, name, media_ref, source_range, timeline_range, source, take=None, note=None): + self.name = name + self.media_ref = media_ref + self.source_range = source_range + self.timeline_range = timeline_range + self.source = source + self.take = take + self.note = note + + def printData(self): + print("name: ", self.name) + print("take: ", self.take) + print("media ref: ", self.media_ref) + print("source start time: ", self.source_range.start_time.value, " duration: ", self.source_range.duration.value) + print("timeline start time:", self.timeline_range.start_time.value, " duration: ", self.timeline_range.duration.value) + if(self.note != ""): + print("note: ", self.note) + print("source clip: ", self.source.name) + + # compare truncated names + def sameName(self, cA): + if(self.name.lower() == cA.name.lower()): + return True + else: + return False + + # note: local and source duration should always match, can assume same + # compare the duration within the timeline for 2 clips + def sameDuration(self, cA): + if(self.timeline_range.duration.value == cA.timeline_range.duration.value): + return True + else: + return False + + # compare 2 clips and see if they are the exact same, whether exact or moved along + # the timeline + def checkSame(self, cA): + isSame = False + # check names are same + if(self.sameName(cA)): + # check source range is same + if(self.source_range == cA.source_range): + # print(self.name, " ", self.timeline_range, " ", cA.timeline_range) + # check in same place on timeline + if(self.timeline_range == cA.timeline_range): + isSame = True + # check duration is same but not necessarily in same place on timeline + elif(self.sameDuration(cA)): + # Note: check in relation to left and right? + # know if moved in seq rather than everything shifted over because of lengthen/shorten of other clips + isSame = True + self.note = "moved" + else: + # print("source range different", cA.name, self.name) + # print(self.media_ref) + # print(self.media_ref.target_url) + pass + + return isSame + + # compare 2 clips and see if they have been + # compare self: "new", to old + def checkEdited(self, cA): + isEdited = False + + # Note: assumption that source range and timeline range duration always equal + assert(self.source_range.duration.value == self.timeline_range.duration.value), "clip source range and timeline range durations don't match" + assert(cA.source_range.duration.value == cA.timeline_range.duration.value), "clip source range and timeline range durations don't match" + + selfDur = self.source_range.duration + cADur = cA.source_range.duration + + selfSourceStart = self.source_range.start_time + cASourceStart = cA.source_range.start_time + + # clip duration same but referencing different areas on the same timeline + # if selfDur.value == cADur.value: + # if (self.source_range.start_time != cA.source_range.start_time): + # # print("source range dif between: ", self.name, "and", cA.name) + # # self.printData() + # # cA.printData() + # self.note = "source range start times differ" + # isEdited = True + + if(self.source_range != cA.source_range): + self.note = "source range changed" + isEdited = True + deltaFramesStr = str(abs(selfDur.to_frames() - cADur.to_frames())) + + if(selfDur.value == cADur.value): + self.note = "start time changed" + + # clip duration shorter + elif(selfDur.value < cADur.value): + self.note = "trimmed " + deltaFramesStr + " frames" + + if(selfSourceStart.value == cASourceStart.value): + self.note = "trimmed tail by " + deltaFramesStr + " frames" + elif(selfSourceStart.value < cASourceStart.value): + self.note = "trimmed head by " + deltaFramesStr + " frames" + + # clip duration longer + elif(selfDur.value > cADur.value): + self.note = "lengthened" + + if(selfSourceStart.value == cASourceStart.value): + self.note = "lengthened tail by " + deltaFramesStr + " frames" + elif(selfSourceStart.value > cASourceStart.value): + self.note = "lengthened head by " + deltaFramesStr + " frames" + + return isEdited \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py new file mode 100644 index 000000000..83b0ba2fd --- /dev/null +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -0,0 +1,341 @@ +import opentimelineio as otio +import copy +from .clipData import ClipData + +# for debugging, put response into file +def toJson(file): + with open("clipDebug.json", "w") as f: + f.write(file) + +def toTimeline(tracks, timeline=None): + tl = timeline + + if tl is None: + tl = otio.schema.Timeline(name="timeline") + + for t in tracks: + tl.tracks.append(t) + + return tl + +def toOtio(file): + otio.adapters.write_to_file(file, "display.otio") + +# input is list of clipDatas +def sortClips(trackClips): + # sort by clip start time in timeline + return sorted(trackClips, key=lambda clipData: clipData.timeline_range.start_time.value) + +# @params: clip: otio clip +def addRavenColor(clip, color): + # print(clip.metadata) + + if "raven" in clip.metadata: + clip.metadata["raven"]["color"] = color.upper() + else: + colorData = {"color" : color.upper()} + clip.metadata["raven"] = colorData + + # debug + # toJson(otio.adapters.write_to_string(clip.metadata)) + return clip + +def addMarker(newClip, color, clipData): + newMarker = otio.schema.Marker() + newMarker.marked_range = clipData.source_range + color = color.upper() + newMarker.color = color + + if isinstance(clipData, ClipData) and clipData.note is not None: + print("edit note added") + newMarker.name = clipData.note + + if(color == "GREEN"): + newMarker.name = "added" + elif(color == "PINK"): + newMarker.name = "deleted" + + newClip.markers.append(newMarker) + + return newClip + + +def makeEmptyTrack(): + return otio.schema.Track(name="=====================") + + +def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): + # make new blank track with name of kind + track = otio.schema.Track(name=trackName, kind=trackKind) + + # sort clips by start time in timeline + sortedClips = sortClips(trackClips) + + currentEnd = 0 + # add clip to timeline + for clipData in sortedClips: + if clipData is not None: + # add gap if necessary + tlStart = clipData.timeline_range.start_time.value + tlDuration = clipData.timeline_range.duration.value + tlRate = clipData.timeline_range.start_time.rate + + delta = tlStart - currentEnd + + if(delta > 0): + gapDur = otio.opentime.RationalTime(delta, tlRate) + gap = otio.schema.Gap(duration = gapDur) + track.append(gap) + + currentEnd = tlStart + tlDuration + # print("new end: ", currentEnd) + else: + currentEnd += tlDuration + + # add clip to track + newClip = copy.deepcopy(clipData.source) + if clipColor is not None: + newClip = addRavenColor(newClip, clipColor) + if markersOn: + newClip = addMarker(newClip, clipColor, clipData) + track.append(newClip) + + return track + +def makeTrackB(videoGroup, trackNum, audioGroup=None): + tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE", markersOn=True) + tSameV = makeTrack("same", "Video", videoGroup.same) + + flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + flat_videoB.name = "Video B" + str(trackNum) + + return flat_videoB + +def makeTrackA(videoGroup, trackNum, audioGroup=None): + tSameV = makeTrack("same", "Video", videoGroup.same) + # grab the original pair from all the edit clipDatas + + actualEdited = [] + for e in videoGroup.edit: + actualEdited.append(e.pair) + tEditedV = makeTrack("edited", "Video", actualEdited, "ORANGE") + + tDelV = makeTrack("deleted", "Video", videoGroup.delete, "PINK") + + flat_videoA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) + flat_videoA.name = "Video A" + str(trackNum) + + return flat_videoA + +def makeTimelineOfType(tlType, trackA, trackB, videoGroup, audioGroup=None): + newTl = None + + if tlType == "stack": + newTl = makeTimelineStack(trackA, trackB, videoGroup, audioGroup) + elif tlType == "inline": + newTl = makeTimelineInline(trackA, trackB, videoGroup, audioGroup) + elif tlType == "full": + newTl = makeTimelineFull(trackA, trackB, videoGroup, audioGroup) + elif tlType == "simple": + newTl = makeTimelineSimple(trackA, trackB, videoGroup, audioGroup) + else: + print("not a valid display type") + return newTl + +def makeTimelineStack(trackA, trackB, videoGroup, audioGroup=None): + # create new timeline with groups separated out into individual tracks + tl = otio.schema.Timeline(name="timeline") + + # append two original tracks + trackA.name = "Track A" + trackA.name + trackB.name = "Track B" + trackB.name + tl.tracks.append(copy.deepcopy(trackA)) + tl.tracks.append(copy.deepcopy(trackB)) + + tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE") + tSameV = makeTrack("same", "Video", videoGroup.same) + tDelV = makeTrack("deleted", "Video", videoGroup.delete, "RED") + + # append video tracks to timeline + tl.tracks.append(tDelV) + tl.tracks.append(tSameV) + tl.tracks.append(tEditedV) + tl.tracks.append(tAddV) + + # add audio tracks if present + if audioGroup is not None: + tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") + tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") + tSameA = makeTrack("same", "Audio", audioGroup.same) + tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") + + # append video tracks to timeline + tl.tracks.append(tAddA) + tl.tracks.append(tEditedA) + tl.tracks.append(tSameA) + tl.tracks.append(tDelA) + + return tl + +# note: flatten_stack doesn't work when there's transitions +def makeTimelineInline(trackA, trackB, clipGroup, audioGroup=None): + tl = otio.schema.Timeline(name="timeline") + + tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") + tSameV = makeTrack("same", "Video", clipGroup.same) + tDelV = makeTrack("deleted", "Video", clipGroup.delete, "RED") + + flat_videoA = otio.core.flatten_stack([copy.deepcopy(trackA), tDelV]) + flat_videoA.name = "VideoA" + tl.tracks.append(flat_videoA) + + flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + flat_videoB.name = "VideoB" + tl.tracks.append(flat_videoB) + + # add audio tracks if present + if audioGroup is not None: + tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") + tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") + tSameA = makeTrack("same", "Audio", audioGroup.same) + tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") + + flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) + flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) + + flat_audioA.name = "AudioA" + flat_audioB.name = "AudioB" + flat_audioA.kind = "Audio" + flat_audioB.kind = "Audio" + + # append audio tracks to timeline + tl.tracks.append(flat_audioB) + tl.tracks.append(flat_audioA) + + return tl + +def makeDeletes(tl, tracksOfDeletes): + for t in tracksOfDeletes: + tl.tracks.insert(0, t) + return tl + +# note: flatten_stack doesn't work when there's transitions +def makeTimelineSimple(trackA, trackB, clipGroup, audioGroup=None): + tl = otio.schema.Timeline(name="timeline") + + tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") + tSameV = makeTrack("same", "Video", clipGroup.same) + tDelV = makeTrack("deleted", "Video", clipGroup.delete, "PINK") + + tl.tracks.append(tDelV) + + flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + flat_videoB.name = "VideoB" + tl.tracks.append(flat_videoB) + + # commented out for now + # # add audio tracks if present + # if audioGroup is not None: + # tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") + # tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") + # tSameA = makeTrack("same", "Audio", audioGroup.same) + # tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") + + # flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) + # flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) + + # flat_audioA.name = "AudioA" + # flat_audioB.name = "AudioB" + # flat_audioA.kind = "Audio" + # flat_audioB.kind = "Audio" + + # # append audio tracks to timeline + # tl.tracks.append(flat_audioB) + # tl.tracks.append(flat_audioA) + + return tl + +def makeTimelineSplitDelete(trackA, trackB, clipGroup, audioGroup=None): + tl = otio.schema.Timeline(name="timeline") + + tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") + tSameV = makeTrack("same", "Video", clipGroup.same) + tDelV = makeTrack("deleted", "Video", clipGroup.delete, "PINK") + + + for e in clipGroup.edit(): + pass + + + flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + flat_videoB.name = "VideoB" + tl.tracks.append(flat_videoB) + + # commented out for now + # # add audio tracks if present + # if audioGroup is not None: + # tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") + # tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") + # tSameA = makeTrack("same", "Audio", audioGroup.same) + # tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") + + # flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) + # flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) + + # flat_audioA.name = "AudioA" + # flat_audioB.name = "AudioB" + # flat_audioA.kind = "Audio" + # flat_audioB.kind = "Audio" + + # # append audio tracks to timeline + # tl.tracks.append(flat_audioB) + # tl.tracks.append(flat_audioA) + + return tl, tDelV + +def makeTimelineFull(trackA, trackB, videoGroup, audioGroup=None): + tl = otio.schema.Timeline(name="timeline") + + tlFlat = makeTimelineInline(trackA, trackB, videoGroup, audioGroup) + + tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") + tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE") + tDelV = makeTrack("deleted", "Video", videoGroup.delete, "RED") + + # append video tracks to timeline + tl.tracks.append(tDelV) + + # temp testing + tl.tracks.append(copy.deepcopy(trackA)) + tl.tracks.append(copy.deepcopy(trackB)) + + # temp comment + # tlFlatVid = tlFlat.video_tracks() + # for v in tlFlatVid: + # tl.tracks.append(copy.deepcopy(v)) + + tl.tracks.append(tEditedV) + tl.tracks.append(tAddV) + + # add audio tracks if present + if audioGroup is not None: + tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") + tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") + tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") + + # append video tracks to timeline + tl.tracks.append(tAddA) + tl.tracks.append(tEditedA) + + tlFlatAud = tlFlat.audio_tracks() + for a in tlFlatAud: + tl.tracks.append(copy.deepcopy(a)) + + tl.tracks.append(tDelA) + + return tl diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index 7c47df21d..9b7d4ac83 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -23,8 +23,12 @@ import opentimelineio as otio +# sys.path.append("src/py-opentimelineio/opentimelineio/console/otiodiff") + +from .otiodiff import getDif def main(): + """otiotool main program. This function is responsible for executing the steps specified by all of the command line arguments in the right order. @@ -115,8 +119,10 @@ def main(): # ===== NEW Phase 5.5: Diff otio files ====== if args.diff: - print("got diff from args") - diff_otio() + print("got diff from args, using tl:", timelines[0].name, timelines[1].name) + + # function that serves as wrapper to call actual getDif main + diff_otio(timelines[0], timelines[1]) # Phase 6: Remove/Redaction @@ -452,9 +458,8 @@ def parse_arguments(): # NEW ============== parser.add_argument( "--diff", - "-d", action="store_true", - help="""Diff and compare two otio files""" + help="""Diff and compare two otio files. Input file type must be .otio""" ) # ================== @@ -498,8 +503,9 @@ def read_inputs(input_paths): # ======= NEW ======= -def diff_otio(): +def diff_otio(tlA, tlB): print("hello world from diff otio") + getDif.main(tlA, tlB) # =================== From 814c2611a5f4c16c16a60cc25db5aca497467007 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 6 Aug 2025 11:03:24 -0700 Subject: [PATCH 04/30] updated file output of otiodiff to use output of otiotool Signed-off-by: Yingjie Wang --- src/py-opentimelineio/opentimelineio/console/otiotool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index 9b7d4ac83..c59ca88a3 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -122,7 +122,7 @@ def main(): print("got diff from args, using tl:", timelines[0].name, timelines[1].name) # function that serves as wrapper to call actual getDif main - diff_otio(timelines[0], timelines[1]) + timelines = [diff_otio(timelines[0], timelines[1])] # Phase 6: Remove/Redaction @@ -505,7 +505,7 @@ def read_inputs(input_paths): def diff_otio(tlA, tlB): print("hello world from diff otio") - getDif.main(tlA, tlB) + return getDif.main(tlA, tlB) # =================== From 2eaae82028c6a80fda69ce3665388d8e418da8e8 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 7 Aug 2025 10:52:04 -0700 Subject: [PATCH 05/30] comments cleanup Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 41 +------------------ .../console/otiodiff/makeOtio.py | 4 +- 2 files changed, 4 insertions(+), 41 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index a22fa8883..2d9db40e2 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -11,37 +11,7 @@ # set otio version to 0.17 os.environ["OTIO_DEFAULT_TARGET_VERSION_FAMILY_LABEL"] = "OTIO_CORE:0.17.0" -def main(fileA, fileB): - - # parser = argparse.ArgumentParser(description="compare two .otio files with flattened video tracks (one video track only)") - # parser.add_argument("fileA", metavar="fileA", type=str, help="file path to otio file") - # parser.add_argument("fileB", metavar="fileB", type=str, help="file path to otio file") - # parser.add_argument("--display", metavar="display", type=str, help="Specify how the new otio file displays info. Options: 'stack', 'inline', or 'full'") - # parser.add_argument("--flatten", action='store_true', help="Toggle to flatten input files") - - # args = parser.parse_args() - - # assert(fileA[-5:] == ".otio"), "File A is not an otio file" - # assert(fileB[-5:] == ".otio"), "File B is not an otio file" - - # tlA = otio.adapters.read_from_file(fileA) - # tlB = otio.adapters.read_from_file(fileB) - tlA = fileA - tlB = fileB - - # old implmentation - # videoTl = processSingleTrack(tlA, tlB) - - # new implementation that can process inputs with multiple tracks - # displayMode = None - - # if args.display is not None: - # displayMode = args.display.lower() - # displaySettings = ("inline", "stack", "full", "simple") - # if displayMode not in displaySettings: - # print("Not a recognized display mode, defaulting to 'simple'.") - # displayMode = "simple" - +def main(tlA, tlB): # videoTl = processAllTracks(tlA, tlB, "video", displayMode) videoTl = processAllTracksAB(tlA, tlB) @@ -64,7 +34,6 @@ def main(fileA, fileB): print(len(videoTl.find_clips())) # assert(len(tlA.find_clips()) + len(tlB.find_clips()) == len(videoTl.find_clips())), "Clip count doesn't match across two timelines" - # commented out display for now return videoTl def toOtio(data, path): @@ -344,7 +313,6 @@ def compareTracks(trackA, trackB): # return addV, editV, sameV, deleteV return videoGroup -# ============================= NEW FOR MULTITRACK ============================= def processAllTracks(tlA, tlB, trackType, displayMode): # determine which track set is shorter assert(trackType is not None), "Missing type of track in function call" @@ -426,11 +394,8 @@ def processAllTracks(tlA, tlB, trackType, displayMode): return newTl -# maybe just loop through all of the tracks in A and then all of the tracks in B?? -# see if can simplify organization def processAllTracksAB(tlA, tlB): # determine which track set is shorter - # TODO add check that timeline track length is not 0 tracksA = tlA.video_tracks() tracksB = tlB.video_tracks() @@ -440,7 +405,7 @@ def processAllTracksAB(tlA, tlB): shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB - print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) # Process Matched Video Tracks # index through all the video tracks of the timeline with less tracks @@ -537,8 +502,6 @@ def makeSummary(tlA, tlB, videoGroup): print(c.name) print("=======") - -# TODO: add a flatten flag def processSingleTrack(tlA, tlB): assert len(tlA.video_tracks()) == 1, "File A contains more than 1 video track. Please flatten to a single track." assert len(tlB.video_tracks()) == 1, "File B contains more than 1 video track. Please flatten to a single track." diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 83b0ba2fd..0975c963b 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -108,7 +108,7 @@ def makeTrackB(videoGroup, trackNum, audioGroup=None): tSameV = makeTrack("same", "Video", videoGroup.same) flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) - flat_videoB.name = "Video B" + str(trackNum) + flat_videoB.name = str(trackNum) + "Video B" return flat_videoB @@ -124,7 +124,7 @@ def makeTrackA(videoGroup, trackNum, audioGroup=None): tDelV = makeTrack("deleted", "Video", videoGroup.delete, "PINK") flat_videoA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) - flat_videoA.name = "Video A" + str(trackNum) + flat_videoA.name = str(trackNum) + "Video A" return flat_videoA From d6efa99873aeafc4b1ab44f72322887ebf69f251 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 7 Aug 2025 13:59:55 -0700 Subject: [PATCH 06/30] added track type to processTracks and makeTrack to support timelines with video only, audio only, or both video and audio Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 555 +++++++++++------- .../console/otiodiff/makeOtio.py | 45 +- 2 files changed, 360 insertions(+), 240 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index 2d9db40e2..54649a7b1 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -12,9 +12,38 @@ os.environ["OTIO_DEFAULT_TARGET_VERSION_FAMILY_LABEL"] = "OTIO_CORE:0.17.0" def main(tlA, tlB): + hasVideo = False + hasAudio = False + + if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: + hasVideo = True + else: + print("no video tracks") + + if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: + hasAudio = True + else: + print("no audio tracks") + + + outputTl = None + + if hasVideo and hasAudio: + videoTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + for t in videoTl.tracks: + outputTl.tracks.append(copy.deepcopy(t)) + # combine + elif hasVideo: + outputTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + + elif hasAudio: + outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + + # videoTl = processAllTracks(tlA, tlB, "video", displayMode) - videoTl = processAllTracksAB(tlA, tlB) + # videoTl = processAllTracksAB(tlA, tlB) # # audio only # audioTl = processAllTracks(tlA, tlB, "audio") @@ -31,10 +60,10 @@ def main(tlA, tlB): origClipCount += len(t.find_clips()) print(origClipCount) - print(len(videoTl.find_clips())) + print(len(outputTl.find_clips())) # assert(len(tlA.find_clips()) + len(tlB.find_clips()) == len(videoTl.find_clips())), "Clip count doesn't match across two timelines" - return videoTl + return outputTl def toOtio(data, path): otio.adapters.write_to_file(data, path) @@ -174,86 +203,6 @@ def compareClips(clipDatasA, clipDatasB): return added, edited, unchanged, deleted -def processVideo(videoTrackA, videoTrackB): - clipDatasA = [] - clipDatasB = [] - - for c in videoTrackA.find_clips(): - take = None - if(len(c.name.split(" ")) > 1): - take = c.name.split(" ")[1] - else: - take = None - cd = ClipData(c.name.split(" ")[0], - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c, - take) - clipDatasA.append(cd) - - for c in videoTrackB.find_clips(): - take = None - if(len(c.name.split(" ")) > 1): - take = c.name.split(" ")[1] - else: - take = None - cd = ClipData(c.name.split(" ")[0], - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c, - take) - clipDatasB.append(cd) - - (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) - - # compare clips and put into categories - addV = [] - editV = [] - sameV = [] - deleteV = [] - - # compare and categorize unique clips - addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) - - # compare and categorize cloned clips - addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) - addV.extend(addCloneV) - sameV.extend(sameCloneV) - deleteV.extend(deleteCloneV) - - return addV, editV, sameV, deleteV - -def processAudio(audioTrackA, audioTrackB): - addA = [] - editA = [] - sameA = [] - deleteA = [] - - audioClipDatasA = [] - audioClipDatasB = [] - - for c in audioTrackA.find_clips(): - cd = ClipData(c.name, - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c) - audioClipDatasA.append(cd) - - for c in audioTrackB.find_clips(): - cd = ClipData(c.name, - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c) - audioClipDatasB.append(cd) - - addA, editA, sameA, deleteA = compareClips(audioClipDatasA, audioClipDatasB) - - return addA, editA, sameA, deleteA - # clip is an otio Clip def getTake(clip): take = None @@ -313,83 +262,60 @@ def compareTracks(trackA, trackB): # return addV, editV, sameV, deleteV return videoGroup -def processAllTracks(tlA, tlB, trackType, displayMode): - # determine which track set is shorter - assert(trackType is not None), "Missing type of track in function call" - # TODO add check that timeline track length is not 0 - - tracksA = None - tracksB = None +def processTracks(tracksA, tracksB, trackType): newTl = otio.schema.Timeline(name="timeline") - tempB = otio.schema.Timeline(name="timeline") - - if(trackType.lower() == "video"): - tracksA = tlA.video_tracks() - tracksB = tlB.video_tracks() - elif(trackType.lower() == "audio"): - tracksA = tlA.audio_tracks() - tracksB = tlB.audio_tracks() - elif(trackType.lower() == "all"): - print("show both video and audio") + displayA = [] + displayB = [] shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB - print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - # Process Matched Video Tracks - # index through all the video tracks of the timeline with less tracks - tracksOfDels = [] + # Process Matched Tracks + # index through all the tracks of the timeline with less tracks for i in range(0, len(shorterTlTracks)): currTrackA = tracksA[i] currTrackB = tracksB[i] - videoGroup = compareTracks(currTrackA, currTrackB) - - # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + clipGroup = compareTracks(currTrackA, currTrackB) - # add processed tracks to display timeline - getTl = None - if displayMode is None: - print("Warning: Display mode not specified, defaulting to inline") - getTl = makeOtio.makeTimelineOfType("simple", currTrackA, currTrackB, videoGroup) - else: - # getTl = makeOtio.makeTimelineOfType(displayMode, currTrackA, currTrackB, videoGroup) - - # split delete out - getTl, tDelV = makeOtio.makeTimelineSplitDelete(currTrackA, currTrackB, videoGroup) - tracksOfDels.insert(0, tDelV) + trackNum = i + 1 + newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) + displayA.append(newA) - for t in getTl.tracks: - newTl.tracks.append(copy.deepcopy(t)) - print("current track stack size:", len(newTl.tracks)) + newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) + displayB.append(newB) - # Process Unmatched Video Tracks - # mark unmatched tracks as either "added" or "deleted" and add to display timeline + # Process Unmatched Tracks if shorterTlTracks == tracksA: # tlA is shorter so tlB has added tracks for i in range(len(shorterTlTracks), len(tracksB)): newTrack = tracksB[i] + newTrack.name = trackType + " B" + str(i + 1) for c in newTrack.find_clips(): c = makeOtio.addRavenColor(c, "GREEN") + # newMarker = makeOtio.addMarker(c, "GREEN") + # c.markers.append(newMarker) # add to top of track stack - newTl.tracks.append(copy.deepcopy(newTrack)) - print("added unmatched track", len(newTl.tracks)) + displayB.append(copy.deepcopy(newTrack)) + # print("added unmatched track", len(newTl.tracks)) else: for i in range(len(shorterTlTracks), len(tracksA)): # color clips newTrack = tracksA[i] + newTrack.name = trackType + " A" + str(i + 1) for c in newTrack.find_clips(): c = makeOtio.addRavenColor(c, "PINK") + # newMarker = makeOtio.addMarker(c, "PINK") + # c.markers.append(newMarker) + displayA.append(copy.deepcopy(newTrack)) - # add to bottom of track stack - # newTl.tracks.append(copy.deepcopy(newTrack)) - - # split delete out - # tracksOfDels.insert(0, newTrack) - - print("added unmatched track", len(newTl.tracks)) + newTl.tracks.extend(displayA) - makeOtio.makeDeletes(newTl, tracksOfDels) + newEmpty = makeOtio.makeEmptyTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayB) return newTl @@ -397,6 +323,27 @@ def processAllTracks(tlA, tlB, trackType, displayMode): def processAllTracksAB(tlA, tlB): # determine which track set is shorter + hasVideo = False + hasAudio = False + + if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: + hasVideo = True + else: + print("no video tracks") + + if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: + hasAudio = True + else: + print("no audio tracks") + + + if hasVideo: + processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + + if hasAudio: + processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + + tracksA = tlA.video_tracks() tracksB = tlB.video_tracks() newTl = otio.schema.Timeline(name="timeline") @@ -416,10 +363,10 @@ def processAllTracksAB(tlA, tlB): videoGroup = compareTracks(currTrackA, currTrackB) trackNum = i + 1 - newA = makeOtio.makeTrackA(videoGroup, trackNum) + newA = makeOtio.makeTrackA(videoGroup, trackNum, "Video") displayA.append(newA) - newB = makeOtio.makeTrackB(videoGroup, trackNum) + newB = makeOtio.makeTrackB(videoGroup, trackNum, "Video") displayB.append(newB) if shorterTlTracks == tracksA: @@ -427,9 +374,9 @@ def processAllTracksAB(tlA, tlB): for i in range(len(shorterTlTracks), len(tracksB)): newTrack = tracksB[i] for c in newTrack.find_clips(): - # c = makeOtio.addRavenColor(c, "GREEN") - newMarker = makeOtio.addMarker(c, "GREEN") - c.markers.append(newMarker) + c = makeOtio.addRavenColor(c, "GREEN") + # newMarker = makeOtio.addMarker(c, "GREEN") + # c.markers.append(newMarker) # add to top of track stack displayB.append(copy.deepcopy(newTrack)) @@ -439,9 +386,9 @@ def processAllTracksAB(tlA, tlB): # color clips newTrack = tracksA[i] for c in newTrack.find_clips(): - # c = makeOtio.addRavenColor(c, "PINK") - newMarker = makeOtio.addMarker(c, "PINK") - c.markers.append(newMarker) + c = makeOtio.addRavenColor(c, "PINK") + # newMarker = makeOtio.addMarker(c, "PINK") + # c.markers.append(newMarker) displayA.append(copy.deepcopy(newTrack)) newTl.tracks.extend(displayA) @@ -479,7 +426,7 @@ def makeSummary(tlA, tlB, videoGroup): # print(k, ":", len(clonesB[k])) - print("======= Video Clip Info Overview =======") + print("======= Clip Info Overview =======") print("added: ", len(videoGroup.add)) for c in videoGroup.add: print(c.name) @@ -502,86 +449,6 @@ def makeSummary(tlA, tlB, videoGroup): print(c.name) print("=======") -def processSingleTrack(tlA, tlB): - assert len(tlA.video_tracks()) == 1, "File A contains more than 1 video track. Please flatten to a single track." - assert len(tlB.video_tracks()) == 1, "File B contains more than 1 video track. Please flatten to a single track." - - videoTrackA = tlA.video_tracks()[0] - videoTrackB = tlB.video_tracks()[0] - - # check for nested video tracks and stacks - assert(not videoTrackA.find_children(otio._otio.Track)), "File A contains nested track(s). Please flatten to a single track." - # assert(not videoTrackA.find_children(otio._otio.Stack)), "File A contains nested stack(s). Please flatten to a single track." - assert(not videoTrackB.find_children(otio._otio.Track)), "File B contains nested track(s). Please flatten to a single track." - # assert(not videoTrackB.find_children(otio._otio.Stack)), "File B contains nested stack(s). Please flatten to a single track." - - - # ====== VIDEO TRACK PROCESSING ====== - addV, editV, sameV, deleteV = processVideo(videoTrackA, videoTrackB) - - # ====== AUDIO TRACK PROCESSING ====== - # check if audio tracks exist - hasAudio = False - - if(len(tlA.audio_tracks()) != 0): - assert len(tlA.audio_tracks()) == 1, "File A contains more than 1 audio track" - hasAudio = True - if(len(tlB.audio_tracks()) != 0): - assert len(tlB.audio_tracks()) == 1, "File B contains more than 1 audio track" - hasAudio = True - - # if audio track(s) present, compare audio track(s) - if(hasAudio): - audioTrackA = tlA.audio_tracks()[0] - audioTrackB = tlB.audio_tracks()[0] - - addA, editA, sameA, deleteA = processAudio(audioTrackA, audioTrackB) - - # ====== MAKE NEW OTIO ====== - SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) - videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - - # check which display mode is toggled - if(args.display is None): - print("no display mode specified, defaulting to inline") - flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) - toOtio(flatTl) - - # multi-track output - elif(args.display.lower() == "stack"): - print("display mode: stack") - if(hasAudio): - audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) - stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup, audioGroup) - else: - stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup) - toOtio(stackTl) - - # single-track output - elif(args.display.lower() == "inline"): - print("display mode: inline") - if(hasAudio): - audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) - flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup, audioGroup) - - # flat track output - else: - flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) - toOtio(flatTl) - - # both multi and single track output - elif(args.display.lower() == "full"): - print("display mode: full") - if(hasAudio): - audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) - fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup, audioGroup) - else: - fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup) - toOtio(fullTl) - - else: - print("not an accepted display mode, no otios made") - if __name__ == "__main__": main() @@ -613,4 +480,246 @@ def processSingleTrack(tlA, tlB): Test shot multitrack: python ./src/getDif.py /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio -''' \ No newline at end of file +''' + +# def processAllTracks(tlA, tlB, trackType, displayMode): +# # determine which track set is shorter +# assert(trackType is not None), "Missing type of track in function call" +# # TODO add check that timeline track length is not 0 + +# tracksA = None +# tracksB = None +# newTl = otio.schema.Timeline(name="timeline") +# tempB = otio.schema.Timeline(name="timeline") + +# if(trackType.lower() == "video"): +# tracksA = tlA.video_tracks() +# tracksB = tlB.video_tracks() +# elif(trackType.lower() == "audio"): +# tracksA = tlA.audio_tracks() +# tracksB = tlB.audio_tracks() +# elif(trackType.lower() == "all"): +# print("show both video and audio") + +# shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB +# print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + +# # Process Matched Video Tracks +# # index through all the video tracks of the timeline with less tracks +# tracksOfDels = [] +# for i in range(0, len(shorterTlTracks)): +# currTrackA = tracksA[i] +# currTrackB = tracksB[i] + +# videoGroup = compareTracks(currTrackA, currTrackB) + +# # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + +# # add processed tracks to display timeline +# getTl = None +# if displayMode is None: +# print("Warning: Display mode not specified, defaulting to inline") +# getTl = makeOtio.makeTimelineOfType("simple", currTrackA, currTrackB, videoGroup) +# else: +# # getTl = makeOtio.makeTimelineOfType(displayMode, currTrackA, currTrackB, videoGroup) + +# # split delete out +# getTl, tDelV = makeOtio.makeTimelineSplitDelete(currTrackA, currTrackB, videoGroup) +# tracksOfDels.insert(0, tDelV) + +# for t in getTl.tracks: +# newTl.tracks.append(copy.deepcopy(t)) +# print("current track stack size:", len(newTl.tracks)) + +# # Process Unmatched Video Tracks +# # mark unmatched tracks as either "added" or "deleted" and add to display timeline +# if shorterTlTracks == tracksA: +# # tlA is shorter so tlB has added tracks +# for i in range(len(shorterTlTracks), len(tracksB)): +# newTrack = tracksB[i] +# for c in newTrack.find_clips(): +# c = makeOtio.addRavenColor(c, "GREEN") + +# # add to top of track stack +# newTl.tracks.append(copy.deepcopy(newTrack)) +# print("added unmatched track", len(newTl.tracks)) +# else: +# for i in range(len(shorterTlTracks), len(tracksA)): +# # color clips +# newTrack = tracksA[i] +# for c in newTrack.find_clips(): +# c = makeOtio.addRavenColor(c, "PINK") + +# # add to bottom of track stack +# # newTl.tracks.append(copy.deepcopy(newTrack)) + +# # split delete out +# # tracksOfDels.insert(0, newTrack) + +# print("added unmatched track", len(newTl.tracks)) + +# makeOtio.makeDeletes(newTl, tracksOfDels) + +# return newTl + + +# def processVideo(videoTrackA, videoTrackB): +# clipDatasA = [] +# clipDatasB = [] + +# for c in videoTrackA.find_clips(): +# take = None +# if(len(c.name.split(" ")) > 1): +# take = c.name.split(" ")[1] +# else: +# take = None +# cd = ClipData(c.name.split(" ")[0], +# c.media_reference, +# c.source_range, +# c.trimmed_range_in_parent(), +# c, +# take) +# clipDatasA.append(cd) + +# for c in videoTrackB.find_clips(): +# take = None +# if(len(c.name.split(" ")) > 1): +# take = c.name.split(" ")[1] +# else: +# take = None +# cd = ClipData(c.name.split(" ")[0], +# c.media_reference, +# c.source_range, +# c.trimmed_range_in_parent(), +# c, +# take) +# clipDatasB.append(cd) + +# (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) + +# # compare clips and put into categories +# addV = [] +# editV = [] +# sameV = [] +# deleteV = [] + +# # compare and categorize unique clips +# addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) + +# # compare and categorize cloned clips +# addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) +# addV.extend(addCloneV) +# sameV.extend(sameCloneV) +# deleteV.extend(deleteCloneV) + +# return addV, editV, sameV, deleteV + +# def processAudio(audioTrackA, audioTrackB): +# addA = [] +# editA = [] +# sameA = [] +# deleteA = [] + +# audioClipDatasA = [] +# audioClipDatasB = [] + +# for c in audioTrackA.find_clips(): +# cd = ClipData(c.name, +# c.media_reference, +# c.source_range, +# c.trimmed_range_in_parent(), +# c) +# audioClipDatasA.append(cd) + +# for c in audioTrackB.find_clips(): +# cd = ClipData(c.name, +# c.media_reference, +# c.source_range, +# c.trimmed_range_in_parent(), +# c) +# audioClipDatasB.append(cd) + +# addA, editA, sameA, deleteA = compareClips(audioClipDatasA, audioClipDatasB) + +# return addA, editA, sameA, deleteA + + +# def processSingleTrack(tlA, tlB): +# assert len(tlA.video_tracks()) == 1, "File A contains more than 1 video track. Please flatten to a single track." +# assert len(tlB.video_tracks()) == 1, "File B contains more than 1 video track. Please flatten to a single track." + +# videoTrackA = tlA.video_tracks()[0] +# videoTrackB = tlB.video_tracks()[0] + +# # check for nested video tracks and stacks +# assert(not videoTrackA.find_children(otio._otio.Track)), "File A contains nested track(s). Please flatten to a single track." +# # assert(not videoTrackA.find_children(otio._otio.Stack)), "File A contains nested stack(s). Please flatten to a single track." +# assert(not videoTrackB.find_children(otio._otio.Track)), "File B contains nested track(s). Please flatten to a single track." +# # assert(not videoTrackB.find_children(otio._otio.Stack)), "File B contains nested stack(s). Please flatten to a single track." + + +# # ====== VIDEO TRACK PROCESSING ====== +# addV, editV, sameV, deleteV = processVideo(videoTrackA, videoTrackB) + +# # ====== AUDIO TRACK PROCESSING ====== +# # check if audio tracks exist +# hasAudio = False + +# if(len(tlA.audio_tracks()) != 0): +# assert len(tlA.audio_tracks()) == 1, "File A contains more than 1 audio track" +# hasAudio = True +# if(len(tlB.audio_tracks()) != 0): +# assert len(tlB.audio_tracks()) == 1, "File B contains more than 1 audio track" +# hasAudio = True + +# # if audio track(s) present, compare audio track(s) +# if(hasAudio): +# audioTrackA = tlA.audio_tracks()[0] +# audioTrackB = tlB.audio_tracks()[0] + +# addA, editA, sameA, deleteA = processAudio(audioTrackA, audioTrackB) + +# # ====== MAKE NEW OTIO ====== +# SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) +# videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + +# # check which display mode is toggled +# if(args.display is None): +# print("no display mode specified, defaulting to inline") +# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) +# toOtio(flatTl) + +# # multi-track output +# elif(args.display.lower() == "stack"): +# print("display mode: stack") +# if(hasAudio): +# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) +# stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup, audioGroup) +# else: +# stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup) +# toOtio(stackTl) + +# # single-track output +# elif(args.display.lower() == "inline"): +# print("display mode: inline") +# if(hasAudio): +# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) +# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup, audioGroup) + +# # flat track output +# else: +# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) +# toOtio(flatTl) + +# # both multi and single track output +# elif(args.display.lower() == "full"): +# print("display mode: full") +# if(hasAudio): +# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) +# fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup, audioGroup) +# else: +# fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup) +# toOtio(fullTl) + +# else: +# print("not an accepted display mode, no otios made") \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 0975c963b..599221e8f 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -60,12 +60,13 @@ def addMarker(newClip, color, clipData): return newClip -def makeEmptyTrack(): - return otio.schema.Track(name="=====================") +def makeEmptyTrack(trackType): + return otio.schema.Track(name="=====================", kind=trackType) def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): # make new blank track with name of kind + # print("make track of kind: ", trackKind) track = otio.schema.Track(name=trackName, kind=trackKind) # sort clips by start time in timeline @@ -102,31 +103,41 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) return track -def makeTrackB(videoGroup, trackNum, audioGroup=None): - tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE", markersOn=True) - tSameV = makeTrack("same", "Video", videoGroup.same) +def makeTrackB(clipGroup, trackNum, trackKind): + tAddV = makeTrack("added", trackKind, clipGroup.add, "GREEN") + tEditedV = makeTrack("edited", trackKind, clipGroup.edit, "ORANGE", markersOn=True) + tSameV = makeTrack("same", trackKind, clipGroup.same) - flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) - flat_videoB.name = str(trackNum) + "Video B" + flatB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + if trackKind == "Video": + flatB.name = "Video B" + str(trackNum) + elif trackKind == "Audio": + flatB.name = "Audio B" + str(trackNum) - return flat_videoB + flatB.kind = trackKind -def makeTrackA(videoGroup, trackNum, audioGroup=None): - tSameV = makeTrack("same", "Video", videoGroup.same) + return flatB + +def makeTrackA(clipGroup, trackNum, trackKind): + tSameV = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas actualEdited = [] - for e in videoGroup.edit: + for e in clipGroup.edit: actualEdited.append(e.pair) - tEditedV = makeTrack("edited", "Video", actualEdited, "ORANGE") + tEditedV = makeTrack("edited", trackKind, actualEdited, "ORANGE") - tDelV = makeTrack("deleted", "Video", videoGroup.delete, "PINK") + tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") - flat_videoA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) - flat_videoA.name = str(trackNum) + "Video A" + flatA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) + if trackKind == "Video": + flatA.name = "Video A" + str(trackNum) + elif trackKind == "Audio": + flatA.name = "Audio A" + str(trackNum) + + flatA.kind = trackKind - return flat_videoA + return flatA def makeTimelineOfType(tlType, trackA, trackB, videoGroup, audioGroup=None): newTl = None From b4cf7ef5bdbc5f865f8442b805b480315ccaa0c5 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 8 Aug 2025 13:53:32 -0700 Subject: [PATCH 07/30] added clipDB to hold all sorted clips and added check for clips that moved tracks Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 261 ++++++++++-------- 1 file changed, 147 insertions(+), 114 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index 54649a7b1..ed4e02eee 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -15,6 +15,7 @@ def main(tlA, tlB): hasVideo = False hasAudio = False + # check input timelines for video and audio tracks if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: hasVideo = True else: @@ -25,43 +26,27 @@ def main(tlA, tlB): else: print("no audio tracks") - outputTl = None + # process video tracks, audio tracks, or both if hasVideo and hasAudio: videoTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + # combine for t in videoTl.tracks: outputTl.tracks.append(copy.deepcopy(t)) - # combine + elif hasVideo: outputTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") elif hasAudio: outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") - - - # videoTl = processAllTracks(tlA, tlB, "video", displayMode) - # videoTl = processAllTracksAB(tlA, tlB) - - # # audio only - # audioTl = processAllTracks(tlA, tlB, "audio") - - # # both - # allTl = processAllTracks(tlA, tlB, "all") - # setDisplay(args.display.lower()) - - origClipCount = 0 - for t in tlA.video_tracks(): - origClipCount += len(t.find_clips()) - - for t in tlB.video_tracks(): - origClipCount += len(t.find_clips()) + # Debug + origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) print(origClipCount) print(len(outputTl.find_clips())) - # assert(len(tlA.find_clips()) + len(tlB.find_clips()) == len(videoTl.find_clips())), "Clip count doesn't match across two timelines" return outputTl @@ -212,6 +197,15 @@ def getTake(clip): take = None return take +def makeClipData(clip): + cd = ClipData(clip.name.split(" ")[0], + clip.media_reference, + clip.source_range, + clip.trimmed_range_in_parent(), + clip, + getTake(clip)) + return cd + # the consolidated version of processVideo and processAudio, meant to replace both def compareTracks(trackA, trackB): clipDatasA = [] @@ -219,22 +213,12 @@ def compareTracks(trackA, trackB): for c in trackA.find_clips(): # put clip info into ClipData - cd = ClipData(c.name.split(" ")[0], - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c, - getTake(c)) + cd = makeClipData(c) clipDatasA.append(cd) for c in trackB.find_clips(): # put clip info into ClipData - cd = ClipData(c.name.split(" ")[0], - c.media_reference, - c.source_range, - c.trimmed_range_in_parent(), - c, - getTake(c)) + cd = makeClipData(c) clipDatasB.append(cd) (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) @@ -262,10 +246,45 @@ def compareTracks(trackA, trackB): # return addV, editV, sameV, deleteV return videoGroup +def processDB(clipDB): + print("add total: ", len(clipDB["add"])) + print("edit total: ", len(clipDB["edit"])) + print("same total: ", len(clipDB["same"])) + print("delete total: ", len(clipDB["delete"])) + + # use full names to compare + # constrain "moved" to be same dep and take too, otherwise + # shotA (layout) and shotA (anim) would count as a move and not as add + for c in clipDB["add"]: + c.name = c.source.name + for c in clipDB["delete"]: + c.name = c.source.name + + # problem is this one breaks the relats with track + newAdd, newEdit, newSame, newDel = compareClips(clipDB["add"], clipDB["delete"]) + clipDB["newEdit"] = newEdit + clipDB["movedTracks"] = newSame + + # good to run make summary here + print("comparing all adds with all deletes:", len(newAdd), "e", len(newEdit), "s", len(newSame), "d", len(newDel)) + for c in newEdit: + print(c.name) + + print("sum: ", len(clipDB["add"]) + 2 * len(clipDB["edit"]) + 2 * len(clipDB["same"]) +len(clipDB["delete"])) + + # print all the adds + def printAdd(): + for track in clipDB.keys(): + print(track, clipDB[track]["add"]) + + return clipDB + + def processTracks(tracksA, tracksB, trackType): newTl = otio.schema.Timeline(name="timeline") displayA = [] displayB = [] + clipDB = {"add": [], "edit": [], "same": [], "delete": []} shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) @@ -278,6 +297,12 @@ def processTracks(tracksA, tracksB, trackType): clipGroup = compareTracks(currTrackA, currTrackB) + clipDB["add"] += clipGroup.add + clipDB["edit"] += clipGroup.edit + clipDB["same"] += clipGroup.same + clipDB["delete"] += clipGroup.delete + + # add to display otio trackNum = i + 1 newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) displayA.append(newA) @@ -295,6 +320,8 @@ def processTracks(tracksA, tracksB, trackType): c = makeOtio.addRavenColor(c, "GREEN") # newMarker = makeOtio.addMarker(c, "GREEN") # c.markers.append(newMarker) + cd = makeClipData(c) + clipDB["add"].append(cd) # add to top of track stack displayB.append(copy.deepcopy(newTrack)) @@ -308,6 +335,10 @@ def processTracks(tracksA, tracksB, trackType): c = makeOtio.addRavenColor(c, "PINK") # newMarker = makeOtio.addMarker(c, "PINK") # c.markers.append(newMarker) + + cd = makeClipData(c) + clipDB["delete"].append(cd) + displayA.append(copy.deepcopy(newTrack)) newTl.tracks.extend(displayA) @@ -317,91 +348,11 @@ def processTracks(tracksA, tracksB, trackType): newTl.tracks.extend(displayB) - return newTl - - -def processAllTracksAB(tlA, tlB): - # determine which track set is shorter - - hasVideo = False - hasAudio = False - - if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: - hasVideo = True - else: - print("no video tracks") - - if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: - hasAudio = True - else: - print("no audio tracks") - - - if hasVideo: - processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - - if hasAudio: - processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") - - - tracksA = tlA.video_tracks() - tracksB = tlB.video_tracks() - newTl = otio.schema.Timeline(name="timeline") - displayA = [] - displayB = [] - - - shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB - # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - - # Process Matched Video Tracks - # index through all the video tracks of the timeline with less tracks - for i in range(0, len(shorterTlTracks)): - currTrackA = tracksA[i] - currTrackB = tracksB[i] - - videoGroup = compareTracks(currTrackA, currTrackB) - - trackNum = i + 1 - newA = makeOtio.makeTrackA(videoGroup, trackNum, "Video") - displayA.append(newA) - - newB = makeOtio.makeTrackB(videoGroup, trackNum, "Video") - displayB.append(newB) - - if shorterTlTracks == tracksA: - # tlA is shorter so tlB has added tracks - for i in range(len(shorterTlTracks), len(tracksB)): - newTrack = tracksB[i] - for c in newTrack.find_clips(): - c = makeOtio.addRavenColor(c, "GREEN") - # newMarker = makeOtio.addMarker(c, "GREEN") - # c.markers.append(newMarker) - - # add to top of track stack - displayB.append(copy.deepcopy(newTrack)) - # print("added unmatched track", len(newTl.tracks)) - else: - for i in range(len(shorterTlTracks), len(tracksA)): - # color clips - newTrack = tracksA[i] - for c in newTrack.find_clips(): - c = makeOtio.addRavenColor(c, "PINK") - # newMarker = makeOtio.addMarker(c, "PINK") - # c.markers.append(newMarker) - displayA.append(copy.deepcopy(newTrack)) + clipDB = processDB(clipDB) - newTl.tracks.extend(displayA) - - newEmpty = makeOtio.makeEmptyTrack() - newTl.tracks.append(newEmpty) - - newTl.tracks.extend(displayB) return newTl - # ================================================================================= -# TODO: organize the current terminal print-out into a document/txt file def makeSummary(tlA, tlB, videoGroup): print("===================================") print(" Overview Summary ") @@ -482,6 +433,88 @@ def makeSummary(tlA, tlB, videoGroup): python ./src/getDif.py /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio ''' +# def processAllTracksAB(tlA, tlB): +# # determine which track set is shorter + +# hasVideo = False +# hasAudio = False + +# if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: +# hasVideo = True +# else: +# print("no video tracks") + +# if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: +# hasAudio = True +# else: +# print("no audio tracks") + + +# if hasVideo: +# processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + +# if hasAudio: +# processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + + +# tracksA = tlA.video_tracks() +# tracksB = tlB.video_tracks() +# newTl = otio.schema.Timeline(name="timeline") +# displayA = [] +# displayB = [] + + +# shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB +# # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + +# # Process Matched Video Tracks +# # index through all the video tracks of the timeline with less tracks +# for i in range(0, len(shorterTlTracks)): +# currTrackA = tracksA[i] +# currTrackB = tracksB[i] + +# videoGroup = compareTracks(currTrackA, currTrackB) + +# trackNum = i + 1 +# newA = makeOtio.makeTrackA(videoGroup, trackNum, "Video") +# displayA.append(newA) + +# newB = makeOtio.makeTrackB(videoGroup, trackNum, "Video") +# displayB.append(newB) + +# if shorterTlTracks == tracksA: +# # tlA is shorter so tlB has added tracks +# for i in range(len(shorterTlTracks), len(tracksB)): +# newTrack = tracksB[i] +# for c in newTrack.find_clips(): +# c = makeOtio.addRavenColor(c, "GREEN") +# # newMarker = makeOtio.addMarker(c, "GREEN") +# # c.markers.append(newMarker) + +# # add to top of track stack +# displayB.append(copy.deepcopy(newTrack)) +# # print("added unmatched track", len(newTl.tracks)) +# else: +# for i in range(len(shorterTlTracks), len(tracksA)): +# # color clips +# newTrack = tracksA[i] +# for c in newTrack.find_clips(): +# c = makeOtio.addRavenColor(c, "PINK") +# # newMarker = makeOtio.addMarker(c, "PINK") +# # c.markers.append(newMarker) +# displayA.append(copy.deepcopy(newTrack)) + +# newTl.tracks.extend(displayA) + +# newEmpty = makeOtio.makeEmptyTrack() +# newTl.tracks.append(newEmpty) + +# newTl.tracks.extend(displayB) + +# return newTl +# # ================================================================================= + + # def processAllTracks(tlA, tlB, trackType, displayMode): # # determine which track set is shorter # assert(trackType is not None), "Missing type of track in function call" From 8978296d5dfabf35d95fa80e76bda7b34e6d182b Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 11 Aug 2025 10:17:36 -0700 Subject: [PATCH 08/30] sort out moved clips in db and make new moved track with new color in timelineB display Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 4 +- .../opentimelineio/console/otiodiff/getDif.py | 229 ++++++++++++------ .../console/otiodiff/makeOtio.py | 16 +- 3 files changed, 169 insertions(+), 80 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index b2826e850..fa92220de 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -9,12 +9,14 @@ class ClipData: note = "" source = otio.schema.Clip() pair = None + track_num = None - def __init__(self, name, media_ref, source_range, timeline_range, source, take=None, note=None): + def __init__(self, name, media_ref, source_range, timeline_range, track_num, source, take=None, note=None): self.name = name self.media_ref = media_ref self.source_range = source_range self.timeline_range = timeline_range + self.track_num = track_num self.source = source self.take = take self.note = note diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index ed4e02eee..393025919 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -167,6 +167,7 @@ def compareClips(clipDatasA, clipDatasB): else: isSame = cB.checkSame(namesA[cB.name]) if(isSame): + cB.pair = namesA[cB.name] unchanged.append(cB) else: isEdited = cB.checkEdited(namesA[cB.name]) @@ -197,28 +198,29 @@ def getTake(clip): take = None return take -def makeClipData(clip): +def makeClipData(clip, trackNum): cd = ClipData(clip.name.split(" ")[0], clip.media_reference, clip.source_range, clip.trimmed_range_in_parent(), + trackNum, clip, getTake(clip)) return cd # the consolidated version of processVideo and processAudio, meant to replace both -def compareTracks(trackA, trackB): +def compareTracks(trackA, trackB, trackNum): clipDatasA = [] clipDatasB = [] for c in trackA.find_clips(): # put clip info into ClipData - cd = makeClipData(c) + cd = makeClipData(c, trackNum) clipDatasA.append(cd) for c in trackB.find_clips(): # put clip info into ClipData - cd = makeClipData(c) + cd = makeClipData(c, trackNum) clipDatasB.append(cd) (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) @@ -243,48 +245,116 @@ def compareTracks(trackA, trackB): makeSummary(trackA, trackB, videoGroup) - # return addV, editV, sameV, deleteV - return videoGroup + return addV, editV, sameV, deleteV + # return videoGroup -def processDB(clipDB): - print("add total: ", len(clipDB["add"])) - print("edit total: ", len(clipDB["edit"])) - print("same total: ", len(clipDB["same"])) - print("delete total: ", len(clipDB["delete"])) - - # use full names to compare - # constrain "moved" to be same dep and take too, otherwise - # shotA (layout) and shotA (anim) would count as a move and not as add - for c in clipDB["add"]: +def checkMoved(allDel, allAdd): + # ones found as same = moved + # ones found as edited = moved and edited + + # wanted to compare full names to account for dif dep/take + for c in allDel: c.name = c.source.name - for c in clipDB["delete"]: + for c in allAdd: c.name = c.source.name - # problem is this one breaks the relats with track - newAdd, newEdit, newSame, newDel = compareClips(clipDB["add"], clipDB["delete"]) - clipDB["newEdit"] = newEdit - clipDB["movedTracks"] = newSame + newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) + for i in moved: + i.note = "Moved from track: " + str(i.pair.track_num) + # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) + + for i in moveEdit: + i.note += " and moved from track " + str(i.pair.track_num) + # print(i.name, i.note) - # good to run make summary here - print("comparing all adds with all deletes:", len(newAdd), "e", len(newEdit), "s", len(newSame), "d", len(newDel)) - for c in newEdit: - print(c.name) + return newAdd, moveEdit, moved, newDel + + +def processDB(clipDB): + allAdd = [] + allEdit = [] + allSame = [] + allDel = [] + + for track in clipDB.keys(): + clipGroup = clipDB[track] + add = clipDB[track] + # print(clipDB[track]["add"]) + allAdd.extend(clipGroup["add"]) + allDel.extend(clipGroup["delete"]) + allSame.extend(clipGroup["same"]) + allEdit.extend(clipGroup["edit"]) + + clipGroup["moved"] = [] + + add, moveEdit, moved, delete = checkMoved(allDel, allAdd) + + # currently has redundancy where moved clips aren't deleted from add + for cd in moved: + # clipDB[cd.track_num]["add"].remove(cd) + # clipDB[cd.track_num]["delete"].remove(cd) + clipDB[cd.track_num]["moved"].append(cd) + # clipDB[cd.pair.track_num]["moved"].append(cd.pair) + + return clipDB + +def newMakeOtio(clipDB, trackType): + displayA = [] + displayB = [] + for trackNum in clipDB.keys(): + SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete', 'move']) + clipGroup = SortedClipDatas(clipDB[trackNum]["add"], clipDB[trackNum]["edit"], clipDB[trackNum]["same"], clipDB[trackNum]["delete"], clipDB[trackNum]["moved"]) + + newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) + displayA.append(newA) + + newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) + displayB.append(newB) + + return displayA, displayB + + # add note moved from track# and moved to track# - print("sum: ", len(clipDB["add"]) + 2 * len(clipDB["edit"]) + 2 * len(clipDB["same"]) +len(clipDB["delete"])) + + # print("add total: ", len(clipDB["add"])) + # print("edit total: ", len(clipDB["edit"])) + # print("same total: ", len(clipDB["same"])) + # print("delete total: ", len(clipDB["delete"])) + + # # use full names to compare + # # constrain "moved" to be same dep and take too, otherwise + # # shotA (layout) and shotA (anim) would count as a move and not as add + # for c in clipDB["add"]: + # c.name = c.source.name + # for c in clipDB["delete"]: + # c.name = c.source.name + + # # problem is this one breaks the relats with track + # newAdd, newEdit, newSame, newDel = compareClips(clipDB["add"], clipDB["delete"]) + # clipDB["newEdit"] = newEdit + # clipDB["movedTracks"] = newSame + + # # good to run make summary here + # print("comparing all adds with all deletes:", len(newAdd), "e", len(newEdit), "s", len(newSame), "d", len(newDel)) + # for c in newEdit: + # print(c.name) + + # print("sum: ", len(clipDB["add"]) + 2 * len(clipDB["edit"]) + 2 * len(clipDB["same"]) +len(clipDB["delete"])) # print all the adds - def printAdd(): - for track in clipDB.keys(): - print(track, clipDB[track]["add"]) + # def printAdd(): + # for track in clipDB.keys(): + # print(track, clipDB[track]["add"]) - return clipDB + # return clipDB def processTracks(tracksA, tracksB, trackType): newTl = otio.schema.Timeline(name="timeline") displayA = [] displayB = [] - clipDB = {"add": [], "edit": [], "same": [], "delete": []} + clipDB = {} + # clipDB = {"add": [], "edit": [], "same": [], "delete": []} shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) @@ -294,52 +364,66 @@ def processTracks(tracksA, tracksB, trackType): for i in range(0, len(shorterTlTracks)): currTrackA = tracksA[i] currTrackB = tracksB[i] + trackNum = i + 1 - clipGroup = compareTracks(currTrackA, currTrackB) - - clipDB["add"] += clipGroup.add - clipDB["edit"] += clipGroup.edit - clipDB["same"] += clipGroup.same - clipDB["delete"] += clipGroup.delete - # add to display otio - trackNum = i + 1 - newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) - displayA.append(newA) + # clipGroup = compareTracks(currTrackA, currTrackB, trackNum) + add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) + # print(add) - newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) - displayB.append(newB) + # newDict = {"add": add, "edit": edit, "same": same, "delete": delete} + # clipDB[trackNum] = newDict + clipDB[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} + print("here", clipDB[trackNum]["add"][0].name) - # Process Unmatched Tracks - if shorterTlTracks == tracksA: - # tlA is shorter so tlB has added tracks - for i in range(len(shorterTlTracks), len(tracksB)): - newTrack = tracksB[i] - newTrack.name = trackType + " B" + str(i + 1) - for c in newTrack.find_clips(): - c = makeOtio.addRavenColor(c, "GREEN") - # newMarker = makeOtio.addMarker(c, "GREEN") - # c.markers.append(newMarker) - cd = makeClipData(c) - clipDB["add"].append(cd) - - # add to top of track stack - displayB.append(copy.deepcopy(newTrack)) - # print("added unmatched track", len(newTl.tracks)) - else: - for i in range(len(shorterTlTracks), len(tracksA)): - # color clips - newTrack = tracksA[i] - newTrack.name = trackType + " A" + str(i + 1) - for c in newTrack.find_clips(): - c = makeOtio.addRavenColor(c, "PINK") - # newMarker = makeOtio.addMarker(c, "PINK") - # c.markers.append(newMarker) + # clipDB["add"] += clipGroup.add + # clipDB["edit"] += clipGroup.edit + # clipDB["same"] += clipGroup.same + # clipDB["delete"] += clipGroup.delete - cd = makeClipData(c) - clipDB["delete"].append(cd) + # add to display otio + # newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) + # displayA.append(newA) + + # newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) + # displayB.append(newB) + + # # Process Unmatched Tracks + # if shorterTlTracks == tracksA: + # # tlA is shorter so tlB has added tracks + # for i in range(len(shorterTlTracks), len(tracksB)): + # newTrack = tracksB[i] + # trackNum = i + 1 + # newTrack.name = trackType + " B" + str(trackNum) + # for c in newTrack.find_clips(): + # c = makeOtio.addRavenColor(c, "GREEN") + # # newMarker = makeOtio.addMarker(c, "GREEN") + # # c.markers.append(newMarker) + + # # cd = makeClipData(c, trackNum) + # # clipDB[trackNum].append(cd) + + # # add to top of track stack + # displayB.append(copy.deepcopy(newTrack)) + # # print("added unmatched track", len(newTl.tracks)) + # else: + # for i in range(len(shorterTlTracks), len(tracksA)): + # # color clips + # newTrack = tracksA[i] + # trackNum = i + 1 + # newTrack.name = trackType + " A" + str(trackNum) + # for c in newTrack.find_clips(): + # c = makeOtio.addRavenColor(c, "PINK") + # # newMarker = makeOtio.addMarker(c, "PINK") + # # c.markers.append(newMarker) + + # # cd = makeClipData(c, trackNum) + # # clipDB[trackNum].append(cd) + + # displayA.append(copy.deepcopy(newTrack)) - displayA.append(copy.deepcopy(newTrack)) + clipDB = processDB(clipDB) + displayA, displayB = newMakeOtio(clipDB, trackType) newTl.tracks.extend(displayA) @@ -348,9 +432,6 @@ def processTracks(tracksA, tracksB, trackType): newTl.tracks.extend(displayB) - clipDB = processDB(clipDB) - - return newTl def makeSummary(tlA, tlB, videoGroup): diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 599221e8f..7ff0dc44c 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -47,7 +47,7 @@ def addMarker(newClip, color, clipData): newMarker.color = color if isinstance(clipData, ClipData) and clipData.note is not None: - print("edit note added") + # print("edit note added") newMarker.name = clipData.note if(color == "GREEN"): @@ -107,8 +107,9 @@ def makeTrackB(clipGroup, trackNum, trackKind): tAddV = makeTrack("added", trackKind, clipGroup.add, "GREEN") tEditedV = makeTrack("edited", trackKind, clipGroup.edit, "ORANGE", markersOn=True) tSameV = makeTrack("same", trackKind, clipGroup.same) + tMovedV = makeTrack("moved", trackKind, clipGroup.move, "PURPLE", markersOn=True) - flatB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) + flatB = otio.core.flatten_stack([tSameV, tEditedV, tAddV, tMovedV]) if trackKind == "Video": flatB.name = "Video B" + str(trackNum) elif trackKind == "Audio": @@ -122,10 +123,15 @@ def makeTrackA(clipGroup, trackNum, trackKind): tSameV = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas - actualEdited = [] + prevEdited = [] + prevMoved = [] for e in clipGroup.edit: - actualEdited.append(e.pair) - tEditedV = makeTrack("edited", trackKind, actualEdited, "ORANGE") + prevEdited.append(e.pair) + tEditedV = makeTrack("edited", trackKind, prevEdited, "ORANGE") + + for m in clipGroup.move: + prevMoved.append(m.pair) + tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") From cf3288f7f1be4122801df4679dbf706b9985c51d Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 11 Aug 2025 11:11:05 -0700 Subject: [PATCH 09/30] removed move classification if move within same track Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 59 +++++++++++++++---- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index 393025919..ef215a743 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -240,10 +240,10 @@ def compareTracks(trackA, trackB, trackNum): sameV.extend(sameCloneV) deleteV.extend(deleteCloneV) - SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) - videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) + # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) + # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - makeSummary(trackA, trackB, videoGroup) + # makeSummary(trackA, trackB, videoGroup) return addV, editV, sameV, deleteV # return videoGroup @@ -259,8 +259,9 @@ def checkMoved(allDel, allAdd): c.name = c.source.name newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) - for i in moved: - i.note = "Moved from track: " + str(i.pair.track_num) + moved = [clip for clip in moved if clip.track_num != clip.pair.track_num] + for clip in moved: + clip.note = "Moved from track: " + str(clip.pair.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) for i in moveEdit: @@ -278,14 +279,13 @@ def processDB(clipDB): for track in clipDB.keys(): clipGroup = clipDB[track] - add = clipDB[track] # print(clipDB[track]["add"]) allAdd.extend(clipGroup["add"]) allDel.extend(clipGroup["delete"]) allSame.extend(clipGroup["same"]) allEdit.extend(clipGroup["edit"]) - clipGroup["moved"] = [] + clipGroup["move"] = [] add, moveEdit, moved, delete = checkMoved(allDel, allAdd) @@ -293,7 +293,7 @@ def processDB(clipDB): for cd in moved: # clipDB[cd.track_num]["add"].remove(cd) # clipDB[cd.track_num]["delete"].remove(cd) - clipDB[cd.track_num]["moved"].append(cd) + clipDB[cd.track_num]["move"].append(cd) # clipDB[cd.pair.track_num]["moved"].append(cd.pair) return clipDB @@ -303,7 +303,7 @@ def newMakeOtio(clipDB, trackType): displayB = [] for trackNum in clipDB.keys(): SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete', 'move']) - clipGroup = SortedClipDatas(clipDB[trackNum]["add"], clipDB[trackNum]["edit"], clipDB[trackNum]["same"], clipDB[trackNum]["delete"], clipDB[trackNum]["moved"]) + clipGroup = SortedClipDatas(clipDB[trackNum]["add"], clipDB[trackNum]["edit"], clipDB[trackNum]["same"], clipDB[trackNum]["delete"], clipDB[trackNum]["move"]) newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) displayA.append(newA) @@ -374,7 +374,7 @@ def processTracks(tracksA, tracksB, trackType): # newDict = {"add": add, "edit": edit, "same": same, "delete": delete} # clipDB[trackNum] = newDict clipDB[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} - print("here", clipDB[trackNum]["add"][0].name) + # print("here", clipDB[trackNum]["add"][0].name) # clipDB["add"] += clipGroup.add # clipDB["edit"] += clipGroup.edit @@ -423,6 +423,8 @@ def processTracks(tracksA, tracksB, trackType): # displayA.append(copy.deepcopy(newTrack)) clipDB = processDB(clipDB) + + newMakeSummary(clipDB, "perTrack") displayA, displayB = newMakeOtio(clipDB, trackType) newTl.tracks.extend(displayA) @@ -434,6 +436,43 @@ def processTracks(tracksA, tracksB, trackType): return newTl +def newMakeSummary(clipDB, mode): + print("===================================") + print(" Overview Summary ") + print("===================================") + allAdd = [] + allEdit = [] + allSame = [] + allDel = [] + allMove = [] + + if mode == "summary": + for track in clipDB.keys(): + clipGroup = clipDB[track] + + allAdd.extend(clipGroup["add"]) + allDel.extend(clipGroup["delete"]) + allSame.extend(clipGroup["same"]) + allEdit.extend(clipGroup["edit"]) + allMove.extend(clipGroup["move"]) + + print("total added:", len(allAdd)) + print("total edited:", len(allEdit)) + print("total moved:", len(allMove)) + print("total deleted:", len(allDel)) + + if mode == "perTrack": + # print by track + for track in clipDB.keys(): + clipGroup = clipDB[track] + print("================== Track", track, "==================") + for cat in clipGroup.keys(): + print("") + print(cat.upper(), ":", len(clipGroup[cat])) + for i in clipGroup[cat]: + print(i.name) + print("") + def makeSummary(tlA, tlB, videoGroup): print("===================================") print(" Overview Summary ") From db1022b5a88c576a519fc356088d60367a23c9d8 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 11 Aug 2025 12:04:15 -0700 Subject: [PATCH 10/30] added handling of unmatched extra tracks back in Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 60 +++++++++++-------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index ef215a743..19e1d1317 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -280,10 +280,10 @@ def processDB(clipDB): for track in clipDB.keys(): clipGroup = clipDB[track] # print(clipDB[track]["add"]) - allAdd.extend(clipGroup["add"]) - allDel.extend(clipGroup["delete"]) - allSame.extend(clipGroup["same"]) - allEdit.extend(clipGroup["edit"]) + allAdd.extend(clipGroup["add"]) if "add" in clipGroup.keys() else print("no add ") + allDel.extend(clipGroup["delete"]) if "delete" in clipGroup.keys() else print("no del") + allSame.extend(clipGroup["same"]) if "same" in clipGroup.keys() else print("no same") + allEdit.extend(clipGroup["edit"]) if "edit" in clipGroup.keys() else print("no edit") clipGroup["move"] = [] @@ -389,42 +389,50 @@ def processTracks(tracksA, tracksB, trackType): # displayB.append(newB) # # Process Unmatched Tracks - # if shorterTlTracks == tracksA: + if shorterTlTracks == tracksA: # # tlA is shorter so tlB has added tracks - # for i in range(len(shorterTlTracks), len(tracksB)): - # newTrack = tracksB[i] - # trackNum = i + 1 - # newTrack.name = trackType + " B" + str(trackNum) - # for c in newTrack.find_clips(): + for i in range(len(shorterTlTracks), len(tracksB)): + newTrack = tracksB[i] + trackNum = i + 1 + newTrack.name = trackType + " B" + str(trackNum) + + added = [] + for c in newTrack.find_clips(): # c = makeOtio.addRavenColor(c, "GREEN") # # newMarker = makeOtio.addMarker(c, "GREEN") # # c.markers.append(newMarker) - # # cd = makeClipData(c, trackNum) - # # clipDB[trackNum].append(cd) + cd = makeClipData(c, trackNum) + added.append(cd) # # add to top of track stack # displayB.append(copy.deepcopy(newTrack)) # # print("added unmatched track", len(newTl.tracks)) - # else: - # for i in range(len(shorterTlTracks), len(tracksA)): + + clipDB[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} + else: + for i in range(len(shorterTlTracks), len(tracksA)): # # color clips - # newTrack = tracksA[i] - # trackNum = i + 1 - # newTrack.name = trackType + " A" + str(trackNum) - # for c in newTrack.find_clips(): + newTrack = tracksA[i] + trackNum = i + 1 + newTrack.name = trackType + " A" + str(trackNum) + + deleted = [] + for c in newTrack.find_clips(): # c = makeOtio.addRavenColor(c, "PINK") # # newMarker = makeOtio.addMarker(c, "PINK") # # c.markers.append(newMarker) - # # cd = makeClipData(c, trackNum) - # # clipDB[trackNum].append(cd) + cd = makeClipData(c, trackNum) + deleted.append(cd) # displayA.append(copy.deepcopy(newTrack)) + clipDB[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} + clipDB = processDB(clipDB) - newMakeSummary(clipDB, "perTrack") + newMakeSummary(clipDB, "summary") displayA, displayB = newMakeOtio(clipDB, trackType) newTl.tracks.extend(displayA) @@ -450,11 +458,11 @@ def newMakeSummary(clipDB, mode): for track in clipDB.keys(): clipGroup = clipDB[track] - allAdd.extend(clipGroup["add"]) - allDel.extend(clipGroup["delete"]) - allSame.extend(clipGroup["same"]) - allEdit.extend(clipGroup["edit"]) - allMove.extend(clipGroup["move"]) + allAdd.extend(clipGroup["add"]) if "add" in clipGroup.keys() else print("no add") + allDel.extend(clipGroup["delete"]) if "delete" in clipGroup.keys() else print("no del") + allSame.extend(clipGroup["same"]) if "same" in clipGroup.keys() else print("no same") + allEdit.extend(clipGroup["edit"]) if "edit" in clipGroup.keys() else print("no edit") + allMove.extend(clipGroup["move"]) if "move" in clipGroup.keys() else print("no move") print("total added:", len(allAdd)) print("total edited:", len(allEdit)) From 9a2831d61feec52672f81fca2171038c9474ce0f Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Tue, 12 Aug 2025 09:48:36 -0700 Subject: [PATCH 11/30] reorder audio timeline display order to have B on top Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 4 +-- .../opentimelineio/console/otiodiff/getDif.py | 22 ++++++++++----- .../console/otiodiff/makeOtio.py | 27 +++++++++++++++---- 3 files changed, 40 insertions(+), 13 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index fa92220de..e62f37172 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -78,8 +78,8 @@ def checkEdited(self, cA): isEdited = False # Note: assumption that source range and timeline range duration always equal - assert(self.source_range.duration.value == self.timeline_range.duration.value), "clip source range and timeline range durations don't match" - assert(cA.source_range.duration.value == cA.timeline_range.duration.value), "clip source range and timeline range durations don't match" + # assert(self.source_range.duration.value == self.timeline_range.duration.value), "clip source range and timeline range durations don't match" + # assert(cA.source_range.duration.value == cA.timeline_range.duration.value), "clip source range and timeline range durations don't match" selfDur = self.source_range.duration cADur = cA.source_range.duration diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index 19e1d1317..bf1436d8a 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -435,18 +435,28 @@ def processTracks(tracksA, tracksB, trackType): newMakeSummary(clipDB, "summary") displayA, displayB = newMakeOtio(clipDB, trackType) - newTl.tracks.extend(displayA) + if trackType == "Video": + newTl.tracks.extend(displayA) - newEmpty = makeOtio.makeEmptyTrack(trackType) - newTl.tracks.append(newEmpty) - - newTl.tracks.extend(displayB) + newEmpty = makeOtio.makeEmptyTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayB) + elif trackType == "Audio": + newTl.tracks.extend(displayB) + + newEmpty = makeOtio.makeEmptyTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayA) + + makeOtio.colorMovedA(newTl, clipDB) return newTl def newMakeSummary(clipDB, mode): print("===================================") - print(" Overview Summary ") + print(" Overview Summary ") print("===================================") allAdd = [] allEdit = [] diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 7ff0dc44c..913877e71 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -119,6 +119,27 @@ def makeTrackB(clipGroup, trackNum, trackKind): return flatB +def colorMovedA(tl, clipDB): + # maybe make an extract all add/edit/move, etc from clipDB + movedClips = [] + for track in clipDB.keys(): + movedClips.extend(clipDB[track]["move"]) + + for m in movedClips: + movedA = m.pair + track = movedA.track_num + + # find clip in new track that was created + currentTrack = tl.tracks[track] + clips = currentTrack.find_clips() + if movedA.source in clips: + print("found corresponding clip") + # clipToColor = clips.index(movedA.source) + + # print(clipToColor.name) + + # tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) + def makeTrackA(clipGroup, trackNum, trackKind): tSameV = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas @@ -127,11 +148,7 @@ def makeTrackA(clipGroup, trackNum, trackKind): prevMoved = [] for e in clipGroup.edit: prevEdited.append(e.pair) - tEditedV = makeTrack("edited", trackKind, prevEdited, "ORANGE") - - for m in clipGroup.move: - prevMoved.append(m.pair) - tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) + tEditedV = makeTrack("edited", trackKind, prevEdited, "ORANGE") tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") From 8cc3ad84ca5f8acf675461710608fc28e214130e Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Tue, 12 Aug 2025 11:00:40 -0700 Subject: [PATCH 12/30] added overall summary stats and did some code cleanup Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 545 +++--------------- .../opentimelineio/console/otiotool.py | 3 +- 2 files changed, 70 insertions(+), 478 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index bf1436d8a..6ef2bc347 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -26,27 +26,38 @@ def main(tlA, tlB): else: print("no audio tracks") - outputTl = None + makeTlSummary(tlA, tlB) + outputTl = None # process video tracks, audio tracks, or both if hasVideo and hasAudio: - videoTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + videoDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + audioDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + + makeSummary(videoDB, "Video", "perTrack") + makeSummary(audioDB, "Audio", "summary") + + videoTl = makeNewOtio(videoDB, "Video") + outputTl = makeNewOtio(audioDB, "Audio") # combine for t in videoTl.tracks: outputTl.tracks.append(copy.deepcopy(t)) elif hasVideo: - outputTl = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + videoDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + makeSummary(videoDB, "Video", "summary") + outputTl = makeNewOtio(videoDB, "Video") elif hasAudio: - outputTl = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + audioDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + makeSummary(audioDB, "Audio", "summary") + outputTl = makeNewOtio(audioDB, "Audio") # Debug - origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) + # origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) - print(origClipCount) - print(len(outputTl.find_clips())) + # print(origClipCount) + # print(len(outputTl.find_clips())) return outputTl @@ -243,8 +254,6 @@ def compareTracks(trackA, trackB, trackNum): # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - # makeSummary(trackA, trackB, videoGroup) - return addV, editV, sameV, deleteV # return videoGroup @@ -253,6 +262,7 @@ def checkMoved(allDel, allAdd): # ones found as edited = moved and edited # wanted to compare full names to account for dif dep/take + # otherwise shotA (layout) and shotA (anim) would count as a move and not as add for c in allDel: c.name = c.source.name for c in allAdd: @@ -271,7 +281,7 @@ def checkMoved(allDel, allAdd): return newAdd, moveEdit, moved, newDel -def processDB(clipDB): +def sortMoved(clipDB): allAdd = [] allEdit = [] allSame = [] @@ -289,16 +299,17 @@ def processDB(clipDB): add, moveEdit, moved, delete = checkMoved(allDel, allAdd) - # currently has redundancy where moved clips aren't deleted from add + # currently moved clips are still marked as delete in timelineA for cd in moved: - # clipDB[cd.track_num]["add"].remove(cd) - # clipDB[cd.track_num]["delete"].remove(cd) + clipDB[cd.track_num]["add"].remove(cd) clipDB[cd.track_num]["move"].append(cd) + # clipDB[cd.track_num]["delete"].remove(cd) # clipDB[cd.pair.track_num]["moved"].append(cd.pair) return clipDB -def newMakeOtio(clipDB, trackType): +def makeNewOtio(clipDB, trackType): + newTl = otio.schema.Timeline(name="diffed") displayA = [] displayB = [] for trackNum in clipDB.keys(): @@ -311,46 +322,26 @@ def newMakeOtio(clipDB, trackType): newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) displayB.append(newB) - return displayA, displayB - - # add note moved from track# and moved to track# - - - # print("add total: ", len(clipDB["add"])) - # print("edit total: ", len(clipDB["edit"])) - # print("same total: ", len(clipDB["same"])) - # print("delete total: ", len(clipDB["delete"])) - - # # use full names to compare - # # constrain "moved" to be same dep and take too, otherwise - # # shotA (layout) and shotA (anim) would count as a move and not as add - # for c in clipDB["add"]: - # c.name = c.source.name - # for c in clipDB["delete"]: - # c.name = c.source.name - - # # problem is this one breaks the relats with track - # newAdd, newEdit, newSame, newDel = compareClips(clipDB["add"], clipDB["delete"]) - # clipDB["newEdit"] = newEdit - # clipDB["movedTracks"] = newSame - - # # good to run make summary here - # print("comparing all adds with all deletes:", len(newAdd), "e", len(newEdit), "s", len(newSame), "d", len(newDel)) - # for c in newEdit: - # print(c.name) + if trackType == "Video": + newTl.tracks.extend(displayA) - # print("sum: ", len(clipDB["add"]) + 2 * len(clipDB["edit"]) + 2 * len(clipDB["same"]) +len(clipDB["delete"])) + newEmpty = makeOtio.makeEmptyTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayB) + elif trackType == "Audio": + newTl.tracks.extend(displayB) - # print all the adds - # def printAdd(): - # for track in clipDB.keys(): - # print(track, clipDB[track]["add"]) + newEmpty = makeOtio.makeEmptyTrack(trackType) + newTl.tracks.append(newEmpty) + + newTl.tracks.extend(displayA) - # return clipDB + makeOtio.colorMovedA(newTl, clipDB) + return newTl def processTracks(tracksA, tracksB, trackType): - newTl = otio.schema.Timeline(name="timeline") displayA = [] displayB = [] clipDB = {} @@ -366,7 +357,6 @@ def processTracks(tracksA, tracksB, trackType): currTrackB = tracksB[i] trackNum = i + 1 - # clipGroup = compareTracks(currTrackA, currTrackB, trackNum) add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) # print(add) @@ -376,88 +366,46 @@ def processTracks(tracksA, tracksB, trackType): clipDB[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} # print("here", clipDB[trackNum]["add"][0].name) - # clipDB["add"] += clipGroup.add - # clipDB["edit"] += clipGroup.edit - # clipDB["same"] += clipGroup.same - # clipDB["delete"] += clipGroup.delete - - # add to display otio - # newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) - # displayA.append(newA) - - # newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) - # displayB.append(newB) - - # # Process Unmatched Tracks + # Process Unmatched Tracks if shorterTlTracks == tracksA: - # # tlA is shorter so tlB has added tracks + # tlA is shorter so tlB has added tracks for i in range(len(shorterTlTracks), len(tracksB)): newTrack = tracksB[i] trackNum = i + 1 - newTrack.name = trackType + " B" + str(trackNum) + # newTrack.name = trackType + " B" + str(trackNum) added = [] for c in newTrack.find_clips(): - # c = makeOtio.addRavenColor(c, "GREEN") - # # newMarker = makeOtio.addMarker(c, "GREEN") - # # c.markers.append(newMarker) - cd = makeClipData(c, trackNum) added.append(cd) - # # add to top of track stack - # displayB.append(copy.deepcopy(newTrack)) - # # print("added unmatched track", len(newTl.tracks)) - clipDB[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} + else: for i in range(len(shorterTlTracks), len(tracksA)): - # # color clips newTrack = tracksA[i] trackNum = i + 1 - newTrack.name = trackType + " A" + str(trackNum) + # newTrack.name = trackType + " A" + str(trackNum) deleted = [] for c in newTrack.find_clips(): - # c = makeOtio.addRavenColor(c, "PINK") - # # newMarker = makeOtio.addMarker(c, "PINK") - # # c.markers.append(newMarker) - cd = makeClipData(c, trackNum) deleted.append(cd) - # displayA.append(copy.deepcopy(newTrack)) - clipDB[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} - clipDB = processDB(clipDB) - newMakeSummary(clipDB, "summary") - displayA, displayB = newMakeOtio(clipDB, trackType) - - if trackType == "Video": - newTl.tracks.extend(displayA) + clipDB = sortMoved(clipDB) - newEmpty = makeOtio.makeEmptyTrack(trackType) - newTl.tracks.append(newEmpty) - - newTl.tracks.extend(displayB) - elif trackType == "Audio": - newTl.tracks.extend(displayB) - - newEmpty = makeOtio.makeEmptyTrack(trackType) - newTl.tracks.append(newEmpty) - - newTl.tracks.extend(displayA) - - makeOtio.colorMovedA(newTl, clipDB) - - return newTl - -def newMakeSummary(clipDB, mode): + # displayA, displayB = makeNewOtio(clipDB, trackType) + return clipDB + +def makeSummary(clipDB, trackType, mode): + print(trackType.upper(), "CLIPS") print("===================================") print(" Overview Summary ") print("===================================") + allAdd = [] allEdit = [] allSame = [] @@ -487,56 +435,25 @@ def newMakeSummary(clipDB, mode): for cat in clipGroup.keys(): print("") print(cat.upper(), ":", len(clipGroup[cat])) - for i in clipGroup[cat]: - print(i.name) + if cat != "same": + for i in clipGroup[cat]: + print(i.name) print("") -def makeSummary(tlA, tlB, videoGroup): - print("===================================") - print(" Overview Summary ") - print("===================================") - +def makeTlSummary(tlA, tlB): + print("Comparing Timeline B:", tlB.name, "vs") + print(" Timeline A:", tlA.name) + print("") # compare overall file duration - # if(tlB.duration() > tlA.duration()): - # delta = tlB.duration().to_seconds() - tlA.duration().to_seconds() - # print(f"timeline duration increased by {delta:.2f} seconds") - # elif(tlB.duration() < tlA.duration()): - # delta = tlA.duration().to_seconds() - tlB.duration().to_seconds() - # print(f"timeline duration decreased by {delta:.2f} seconds") - # print("") - - # print("======= Cloned Video Clips =======") - # print("Otio A:") - # for k in clonesA.keys(): - # print(k, ":", len(clonesA[k])) - # print("") - # print("Otio B:") - # for k in clonesB.keys(): - # print(k, ":", len(clonesB[k])) - - - print("======= Clip Info Overview =======") - print("added: ", len(videoGroup.add)) - for c in videoGroup.add: - print(c.name) - print("=======") - - print("edited: ", len(videoGroup.edit)) - for c in videoGroup.edit: - print(c.name) - print("=======") - - print("same: ", len(videoGroup.same)) - # for c in sameV: - # print(c.name) - # if(c["label"] == "moved"): - # print(c["name"], " ", c["label"]) - print("=======") - - print("deleted: ", len(videoGroup.delete)) - for c in videoGroup.delete: - print(c.name) - print("=======") + if(tlB.duration() > tlA.duration()): + delta = tlB.duration().to_seconds() - tlA.duration().to_seconds() + print(f"Timeline duration increased by {delta:.2f} seconds") + elif(tlB.duration() < tlA.duration()): + delta = tlA.duration().to_seconds() - tlB.duration().to_seconds() + print(f"Timeline duration decreased by {delta:.2f} seconds") + else: + print("Timeline duration did not change") + print("") if __name__ == "__main__": main() @@ -569,328 +486,4 @@ def makeSummary(tlA, tlB, videoGroup): Test shot multitrack: python ./src/getDif.py /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio -''' - -# def processAllTracksAB(tlA, tlB): -# # determine which track set is shorter - -# hasVideo = False -# hasAudio = False - -# if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: -# hasVideo = True -# else: -# print("no video tracks") - -# if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: -# hasAudio = True -# else: -# print("no audio tracks") - - -# if hasVideo: -# processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - -# if hasAudio: -# processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") - - -# tracksA = tlA.video_tracks() -# tracksB = tlB.video_tracks() -# newTl = otio.schema.Timeline(name="timeline") -# displayA = [] -# displayB = [] - - -# shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB -# # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - -# # Process Matched Video Tracks -# # index through all the video tracks of the timeline with less tracks -# for i in range(0, len(shorterTlTracks)): -# currTrackA = tracksA[i] -# currTrackB = tracksB[i] - -# videoGroup = compareTracks(currTrackA, currTrackB) - -# trackNum = i + 1 -# newA = makeOtio.makeTrackA(videoGroup, trackNum, "Video") -# displayA.append(newA) - -# newB = makeOtio.makeTrackB(videoGroup, trackNum, "Video") -# displayB.append(newB) - -# if shorterTlTracks == tracksA: -# # tlA is shorter so tlB has added tracks -# for i in range(len(shorterTlTracks), len(tracksB)): -# newTrack = tracksB[i] -# for c in newTrack.find_clips(): -# c = makeOtio.addRavenColor(c, "GREEN") -# # newMarker = makeOtio.addMarker(c, "GREEN") -# # c.markers.append(newMarker) - -# # add to top of track stack -# displayB.append(copy.deepcopy(newTrack)) -# # print("added unmatched track", len(newTl.tracks)) -# else: -# for i in range(len(shorterTlTracks), len(tracksA)): -# # color clips -# newTrack = tracksA[i] -# for c in newTrack.find_clips(): -# c = makeOtio.addRavenColor(c, "PINK") -# # newMarker = makeOtio.addMarker(c, "PINK") -# # c.markers.append(newMarker) -# displayA.append(copy.deepcopy(newTrack)) - -# newTl.tracks.extend(displayA) - -# newEmpty = makeOtio.makeEmptyTrack() -# newTl.tracks.append(newEmpty) - -# newTl.tracks.extend(displayB) - -# return newTl -# # ================================================================================= - - -# def processAllTracks(tlA, tlB, trackType, displayMode): -# # determine which track set is shorter -# assert(trackType is not None), "Missing type of track in function call" -# # TODO add check that timeline track length is not 0 - -# tracksA = None -# tracksB = None -# newTl = otio.schema.Timeline(name="timeline") -# tempB = otio.schema.Timeline(name="timeline") - -# if(trackType.lower() == "video"): -# tracksA = tlA.video_tracks() -# tracksB = tlB.video_tracks() -# elif(trackType.lower() == "audio"): -# tracksA = tlA.audio_tracks() -# tracksB = tlB.audio_tracks() -# elif(trackType.lower() == "all"): -# print("show both video and audio") - -# shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB -# print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - -# # Process Matched Video Tracks -# # index through all the video tracks of the timeline with less tracks -# tracksOfDels = [] -# for i in range(0, len(shorterTlTracks)): -# currTrackA = tracksA[i] -# currTrackB = tracksB[i] - -# videoGroup = compareTracks(currTrackA, currTrackB) - -# # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - -# # add processed tracks to display timeline -# getTl = None -# if displayMode is None: -# print("Warning: Display mode not specified, defaulting to inline") -# getTl = makeOtio.makeTimelineOfType("simple", currTrackA, currTrackB, videoGroup) -# else: -# # getTl = makeOtio.makeTimelineOfType(displayMode, currTrackA, currTrackB, videoGroup) - -# # split delete out -# getTl, tDelV = makeOtio.makeTimelineSplitDelete(currTrackA, currTrackB, videoGroup) -# tracksOfDels.insert(0, tDelV) - -# for t in getTl.tracks: -# newTl.tracks.append(copy.deepcopy(t)) -# print("current track stack size:", len(newTl.tracks)) - -# # Process Unmatched Video Tracks -# # mark unmatched tracks as either "added" or "deleted" and add to display timeline -# if shorterTlTracks == tracksA: -# # tlA is shorter so tlB has added tracks -# for i in range(len(shorterTlTracks), len(tracksB)): -# newTrack = tracksB[i] -# for c in newTrack.find_clips(): -# c = makeOtio.addRavenColor(c, "GREEN") - -# # add to top of track stack -# newTl.tracks.append(copy.deepcopy(newTrack)) -# print("added unmatched track", len(newTl.tracks)) -# else: -# for i in range(len(shorterTlTracks), len(tracksA)): -# # color clips -# newTrack = tracksA[i] -# for c in newTrack.find_clips(): -# c = makeOtio.addRavenColor(c, "PINK") - -# # add to bottom of track stack -# # newTl.tracks.append(copy.deepcopy(newTrack)) - -# # split delete out -# # tracksOfDels.insert(0, newTrack) - -# print("added unmatched track", len(newTl.tracks)) - -# makeOtio.makeDeletes(newTl, tracksOfDels) - -# return newTl - - -# def processVideo(videoTrackA, videoTrackB): -# clipDatasA = [] -# clipDatasB = [] - -# for c in videoTrackA.find_clips(): -# take = None -# if(len(c.name.split(" ")) > 1): -# take = c.name.split(" ")[1] -# else: -# take = None -# cd = ClipData(c.name.split(" ")[0], -# c.media_reference, -# c.source_range, -# c.trimmed_range_in_parent(), -# c, -# take) -# clipDatasA.append(cd) - -# for c in videoTrackB.find_clips(): -# take = None -# if(len(c.name.split(" ")) > 1): -# take = c.name.split(" ")[1] -# else: -# take = None -# cd = ClipData(c.name.split(" ")[0], -# c.media_reference, -# c.source_range, -# c.trimmed_range_in_parent(), -# c, -# take) -# clipDatasB.append(cd) - -# (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) - -# # compare clips and put into categories -# addV = [] -# editV = [] -# sameV = [] -# deleteV = [] - -# # compare and categorize unique clips -# addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) - -# # compare and categorize cloned clips -# addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) -# addV.extend(addCloneV) -# sameV.extend(sameCloneV) -# deleteV.extend(deleteCloneV) - -# return addV, editV, sameV, deleteV - -# def processAudio(audioTrackA, audioTrackB): -# addA = [] -# editA = [] -# sameA = [] -# deleteA = [] - -# audioClipDatasA = [] -# audioClipDatasB = [] - -# for c in audioTrackA.find_clips(): -# cd = ClipData(c.name, -# c.media_reference, -# c.source_range, -# c.trimmed_range_in_parent(), -# c) -# audioClipDatasA.append(cd) - -# for c in audioTrackB.find_clips(): -# cd = ClipData(c.name, -# c.media_reference, -# c.source_range, -# c.trimmed_range_in_parent(), -# c) -# audioClipDatasB.append(cd) - -# addA, editA, sameA, deleteA = compareClips(audioClipDatasA, audioClipDatasB) - -# return addA, editA, sameA, deleteA - - -# def processSingleTrack(tlA, tlB): -# assert len(tlA.video_tracks()) == 1, "File A contains more than 1 video track. Please flatten to a single track." -# assert len(tlB.video_tracks()) == 1, "File B contains more than 1 video track. Please flatten to a single track." - -# videoTrackA = tlA.video_tracks()[0] -# videoTrackB = tlB.video_tracks()[0] - -# # check for nested video tracks and stacks -# assert(not videoTrackA.find_children(otio._otio.Track)), "File A contains nested track(s). Please flatten to a single track." -# # assert(not videoTrackA.find_children(otio._otio.Stack)), "File A contains nested stack(s). Please flatten to a single track." -# assert(not videoTrackB.find_children(otio._otio.Track)), "File B contains nested track(s). Please flatten to a single track." -# # assert(not videoTrackB.find_children(otio._otio.Stack)), "File B contains nested stack(s). Please flatten to a single track." - - -# # ====== VIDEO TRACK PROCESSING ====== -# addV, editV, sameV, deleteV = processVideo(videoTrackA, videoTrackB) - -# # ====== AUDIO TRACK PROCESSING ====== -# # check if audio tracks exist -# hasAudio = False - -# if(len(tlA.audio_tracks()) != 0): -# assert len(tlA.audio_tracks()) == 1, "File A contains more than 1 audio track" -# hasAudio = True -# if(len(tlB.audio_tracks()) != 0): -# assert len(tlB.audio_tracks()) == 1, "File B contains more than 1 audio track" -# hasAudio = True - -# # if audio track(s) present, compare audio track(s) -# if(hasAudio): -# audioTrackA = tlA.audio_tracks()[0] -# audioTrackB = tlB.audio_tracks()[0] - -# addA, editA, sameA, deleteA = processAudio(audioTrackA, audioTrackB) - -# # ====== MAKE NEW OTIO ====== -# SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) -# videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - -# # check which display mode is toggled -# if(args.display is None): -# print("no display mode specified, defaulting to inline") -# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) -# toOtio(flatTl) - -# # multi-track output -# elif(args.display.lower() == "stack"): -# print("display mode: stack") -# if(hasAudio): -# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) -# stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup, audioGroup) -# else: -# stackTl = makeOtio.makeTimelineStack(videoTrackA, videoTrackB, videoGroup) -# toOtio(stackTl) - -# # single-track output -# elif(args.display.lower() == "inline"): -# print("display mode: inline") -# if(hasAudio): -# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) -# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup, audioGroup) - -# # flat track output -# else: -# flatTl = makeOtio.makeTimelineInline(videoTrackA, videoTrackB, videoGroup) -# toOtio(flatTl) - -# # both multi and single track output -# elif(args.display.lower() == "full"): -# print("display mode: full") -# if(hasAudio): -# audioGroup = SortedClipDatas(addA, editA, sameA, deleteA) -# fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup, audioGroup) -# else: -# fullTl = makeOtio.makeTimelineFull(videoTrackA, videoTrackB, videoGroup) -# toOtio(fullTl) - -# else: -# print("not an accepted display mode, no otios made") \ No newline at end of file +''' \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index c59ca88a3..867752193 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -119,7 +119,7 @@ def main(): # ===== NEW Phase 5.5: Diff otio files ====== if args.diff: - print("got diff from args, using tl:", timelines[0].name, timelines[1].name) + # print("comparing:", timelines[0].name, timelines[1].name) # function that serves as wrapper to call actual getDif main timelines = [diff_otio(timelines[0], timelines[1])] @@ -504,7 +504,6 @@ def read_inputs(input_paths): # ======= NEW ======= def diff_otio(tlA, tlB): - print("hello world from diff otio") return getDif.main(tlA, tlB) # =================== From 589911696d761b85d4af2ca902475ce469fbaa2b Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Tue, 12 Aug 2025 17:07:06 -0700 Subject: [PATCH 13/30] added unit test file for otiodiff and ported existing tests over Signed-off-by: Yingjie Wang --- tests/test_otiodiff.py | 377 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 377 insertions(+) create mode 100644 tests/test_otiodiff.py diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py new file mode 100644 index 000000000..3638f07c6 --- /dev/null +++ b/tests/test_otiodiff.py @@ -0,0 +1,377 @@ +import unittest +import opentimelineio as otio + +from opentimelineio.console.otiodiff.clipData import ClipData +import opentimelineio.console.otiodiff.makeOtio as makeOtio +import opentimelineio.console.otiodiff.getDif as getDiff + +from collections import namedtuple + + +class TestClipData(unittest.TestCase): + # check if the names of two ClipDatas are the same + def test_same_name(self): + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + assert(clipA.sameName(clipB)) + + def test_different_name(self): + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName2", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + assert(not clipA.sameName(clipB)) + + + def test_same_duration(self): + # check that length of clip is the same + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName2", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + assert(clipA.sameDuration(clipB)) + + def test_different_duration(self): + # check that length of clip is the different + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(duration=otio.opentime.RationalTime(100,1)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName2", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + assert(not clipA.sameDuration(clipB)) + + + def test_check_same(self): + # check that two exact same clips are the same + # check that two exact same clips but moved in the timeline are the same + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + + assert clipA.checkSame(clipB) + + def test_check_not_same(self): + # check that two exact same clips are the same + # check that two exact same clips but moved in the timeline are the same + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName2", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + + assert not clipA.checkSame(clipB) + assert clipA.note is None + + def test_check_not_same2(self): + # check that two exact same clips are the same + # check that two exact same clips but moved in the timeline are the same + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(duration=otio.opentime.RationalTime(100,1)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + + assert not clipA.checkSame(clipB) + assert clipA.note is None + + def test_check_not_same3_moved(self): + # check that two exact same clips are the same + # check that two exact same clips but moved in the timeline are the same + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(100,1)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + + assert clipA.checkSame(clipB) + assert clipA.note == "moved" + + def test_check_Edited(self): + # check for trim head/tail and lengthen head/tail + clipA = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(0,1)), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + clipB = ClipData("testName", + "testMR.mov", + otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(100,1)), + otio.opentime.TimeRange(), + otio.schema.Clip(), + "testTake") + + assert clipA.checkEdited(clipB) + assert clipA.note == "start time changed" + +class TestGetDif(unittest.TestCase): + def test_find_clones(self): + clipA = ClipData("clipA", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("clipB", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipC = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipCClone = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipD = ClipData("clipD", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(40, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + + testClips = [clipA, clipB, clipC, clipCClone, clipD] + clones, nonClones = getDiff.findClones(testClips) + + correctClones = {clipC.name: [clipC, clipCClone]} + correctNonClones = [clipA, clipB, clipD] + + assert(clones == correctClones), "Not all cloned clips correctly identified" + assert(nonClones == correctNonClones), "Not all unique clips correctly identified" + + + def test_sort_clones_clones_in_both(self): + # SETUP + clipA = ClipData("clipA", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("clipB", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipC = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipCClone = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipD = ClipData("clipD", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipDatasA = [clipA, clipB, clipC, clipCClone] + clipDatasB = [clipB, clipC, clipCClone, clipD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" + assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" + assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" + assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" + + def test_sort_clones_clones_in_one(self): + clipA = ClipData("clipA", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("clipB", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipC = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipCClone = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipD = ClipData("clipD", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipDatasA = [clipA, clipB, clipC, clipCClone] + clipDatasB = [clipA, clipB, clipD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" + assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" + assert(len(clonesB) == 0), "Number of clones found in trackB doesn't match" + assert(len(nonClonesB) == 3), "Number of non-clones found in trackB doesn't match" + + def test_sort_clones_clones_in_one_single_in_other(self): + clipA = ClipData("clipA", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipB = ClipData("clipB", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipC = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipCClone = ClipData("clipC", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipD = ClipData("clipD", + "testMR.mov", + otio.opentime.TimeRange(), + otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), + otio.schema.Clip(), + "testTake") + clipDatasA = [clipA, clipB, clipC, clipCClone] + clipDatasB = [clipB, clipC, clipD] + + # EXERCISE + sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) + + # VERIFY + clonesA, nonClonesA = sortedClonesA + clonesB, nonClonesB = sortedClonesB + + assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" + assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" + assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" + assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" + +class TestMakeOtio(unittest.TestCase): + # Test the type parameter to makeTimelineOfType, but not the detailed results. + def test_make_timeline_type(self): + # SETUP + trackA = otio.schema.Track() + trackB = otio.schema.Track() + pass + + # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) + # videoGroup = SortedClipDatas([], [], [], []) + + # # EXERCISE + # tlStack = makeOtio.makeTimelineOfType("stack", trackA, trackB, videoGroup) + # tlInline = makeOtio.makeTimelineOfType("inline", trackA, trackB, videoGroup) + # tlFull = makeOtio.makeTimelineOfType("full", trackA, trackB, videoGroup) + # bogus = makeOtio.makeTimelineOfType("bogus", trackA, trackB, videoGroup) + + # # VERIFY + # assert(len(tlStack.tracks) == 6), "Number of tracks for stack display mode not matched" + # assert(len(tlInline.tracks) == 2), "Number of tracks for inline display mode not matched" + # assert(len(tlFull.tracks) == 5), "Number of tracks for full display mode not matched" + # assert(bogus is None), "Should have been invalid result" + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From ad82f2514f8c324ced70e43665fde1a1b4374163 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 13 Aug 2025 10:21:42 -0700 Subject: [PATCH 14/30] code organization and adding comments Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDif.py | 52 +++++++++++-------- .../console/otiodiff/makeOtio.py | 49 ++++++++--------- 2 files changed, 54 insertions(+), 47 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py index 6ef2bc347..f59f5ff0b 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py @@ -31,27 +31,27 @@ def main(tlA, tlB): outputTl = None # process video tracks, audio tracks, or both if hasVideo and hasAudio: - videoDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - audioDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") - makeSummary(videoDB, "Video", "perTrack") - makeSummary(audioDB, "Audio", "summary") + makeSummary(videoClipDB, "Video", "perTrack") + makeSummary(audioClipDB, "Audio", "summary") - videoTl = makeNewOtio(videoDB, "Video") - outputTl = makeNewOtio(audioDB, "Audio") + videoTl = makeNewOtio(videoClipDB, "Video") + outputTl = makeNewOtio(audioClipDB, "Audio") # combine for t in videoTl.tracks: outputTl.tracks.append(copy.deepcopy(t)) elif hasVideo: - videoDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - makeSummary(videoDB, "Video", "summary") - outputTl = makeNewOtio(videoDB, "Video") + videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + makeSummary(videoClipDB, "Video", "summary") + outputTl = makeNewOtio(videoClipDB, "Video") elif hasAudio: - audioDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") - makeSummary(audioDB, "Audio", "summary") - outputTl = makeNewOtio(audioDB, "Audio") + audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + makeSummary(audioClipDB, "Audio", "summary") + outputTl = makeNewOtio(audioClipDB, "Audio") # Debug # origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) @@ -280,7 +280,7 @@ def checkMoved(allDel, allAdd): return newAdd, moveEdit, moved, newDel - +# TODO: account for move edit, currently only identifies strictly moved def sortMoved(clipDB): allAdd = [] allEdit = [] @@ -290,10 +290,14 @@ def sortMoved(clipDB): for track in clipDB.keys(): clipGroup = clipDB[track] # print(clipDB[track]["add"]) - allAdd.extend(clipGroup["add"]) if "add" in clipGroup.keys() else print("no add ") - allDel.extend(clipGroup["delete"]) if "delete" in clipGroup.keys() else print("no del") - allSame.extend(clipGroup["same"]) if "same" in clipGroup.keys() else print("no same") - allEdit.extend(clipGroup["edit"]) if "edit" in clipGroup.keys() else print("no edit") + if "add" in clipGroup.keys(): + allAdd.extend(clipGroup["add"]) + if "delete" in clipGroup.keys(): + allDel.extend(clipGroup["delete"]) + if "same" in clipGroup.keys(): + allSame.extend(clipGroup["same"]) + if "edit" in clipGroup.keys(): + allEdit.extend(clipGroup["edit"]) clipGroup["move"] = [] @@ -312,7 +316,10 @@ def makeNewOtio(clipDB, trackType): newTl = otio.schema.Timeline(name="diffed") displayA = [] displayB = [] + + # make tracks A and B in output timeline for trackNum in clipDB.keys(): + # use named tuple here since clip categories won't change anymore SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete', 'move']) clipGroup = SortedClipDatas(clipDB[trackNum]["add"], clipDB[trackNum]["edit"], clipDB[trackNum]["same"], clipDB[trackNum]["delete"], clipDB[trackNum]["move"]) @@ -322,6 +329,7 @@ def makeNewOtio(clipDB, trackType): newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) displayB.append(newB) + # write order to output timeline so that timeline B is on top for both video and audio if trackType == "Video": newTl.tracks.extend(displayA) @@ -341,11 +349,11 @@ def makeNewOtio(clipDB, trackType): return newTl -def processTracks(tracksA, tracksB, trackType): - displayA = [] - displayB = [] +def processTracks(tracksA, tracksB): clipDB = {} - # clipDB = {"add": [], "edit": [], "same": [], "delete": []} + # clipDB structure: {{1:{"add": [], "edit": [], "same": [], "delete": []}} + # clipDB keys are track numbers, values are dictionaries + # per track dictionary keys are clip categories, values are lists of clips of that category shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) @@ -361,8 +369,6 @@ def processTracks(tracksA, tracksB, trackType): add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) # print(add) - # newDict = {"add": add, "edit": edit, "same": same, "delete": delete} - # clipDB[trackNum] = newDict clipDB[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} # print("here", clipDB[trackNum]["add"][0].name) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 913877e71..4d11f638b 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -21,7 +21,7 @@ def toTimeline(tracks, timeline=None): def toOtio(file): otio.adapters.write_to_file(file, "display.otio") -# input is list of clipDatas +# input is list of clipDatas, sorts them by start time on the timeline def sortClips(trackClips): # sort by clip start time in timeline return sorted(trackClips, key=lambda clipData: clipData.timeline_range.start_time.value) @@ -59,11 +59,10 @@ def addMarker(newClip, color, clipData): return newClip - +# make new blank track that acts as a separator between the A and B sections def makeEmptyTrack(trackType): return otio.schema.Track(name="=====================", kind=trackType) - def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): # make new blank track with name of kind # print("make track of kind: ", trackKind) @@ -103,6 +102,7 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) return track +# make tracks from timeline B def makeTrackB(clipGroup, trackNum, trackKind): tAddV = makeTrack("added", trackKind, clipGroup.add, "GREEN") tEditedV = makeTrack("edited", trackKind, clipGroup.edit, "ORANGE", markersOn=True) @@ -119,27 +119,7 @@ def makeTrackB(clipGroup, trackNum, trackKind): return flatB -def colorMovedA(tl, clipDB): - # maybe make an extract all add/edit/move, etc from clipDB - movedClips = [] - for track in clipDB.keys(): - movedClips.extend(clipDB[track]["move"]) - - for m in movedClips: - movedA = m.pair - track = movedA.track_num - - # find clip in new track that was created - currentTrack = tl.tracks[track] - clips = currentTrack.find_clips() - if movedA.source in clips: - print("found corresponding clip") - # clipToColor = clips.index(movedA.source) - - # print(clipToColor.name) - - # tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) - +# make tracks from timeline A def makeTrackA(clipGroup, trackNum, trackKind): tSameV = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas @@ -162,6 +142,27 @@ def makeTrackA(clipGroup, trackNum, trackKind): return flatA +# def colorMovedA(tl, clipDB): +# # maybe make an extract all add/edit/move, etc from clipDB +# movedClips = [] +# for track in clipDB.keys(): +# movedClips.extend(clipDB[track]["move"]) + +# for m in movedClips: +# movedA = m.pair +# track = movedA.track_num + +# # find clip in new track that was created +# currentTrack = tl.tracks[track] +# clips = currentTrack.find_clips() +# if movedA.source in clips: +# print("found corresponding clip") +# # clipToColor = clips.index(movedA.source) + +# # print(clipToColor.name) + +# # tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) + def makeTimelineOfType(tlType, trackA, trackB, videoGroup, audioGroup=None): newTl = None From 4d6a751911f80adfae71f042c1e95d4493936824 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 14 Aug 2025 09:27:58 -0700 Subject: [PATCH 15/30] renamed otiodif.py to otiodiff.py, fixed bugs, and added todo's Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 22 +++++--- .../otiodiff/{getDif.py => getDiff.py} | 53 ++++++++++++++----- .../opentimelineio/console/otiotool.py | 16 ++++-- tests/test_otiodiff.py | 2 +- 4 files changed, 69 insertions(+), 24 deletions(-) rename src/py-opentimelineio/opentimelineio/console/otiodiff/{getDif.py => getDiff.py} (90%) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index e62f37172..2bdc246b0 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -1,7 +1,11 @@ import opentimelineio as otio +# clip comparable??? ClipInfo +# TODO: add fullname, full name = name + take, name is just name, add ex, split on space, b4 is name, after is version +# TODO: rename take to version? class ClipData: name = "" + # currently not used in comparisons take = None media_ref = None source_range = otio.opentime.TimeRange() @@ -11,6 +15,10 @@ class ClipData: pair = None track_num = None + # TODO: sort so above is compare, bottom is additional compare result info + +# TODO: rename source to sourceClip, rename pair to matchedClipData? +# just pass source clip and track num so construct here def __init__(self, name, media_ref, source_range, timeline_range, track_num, source, take=None, note=None): self.name = name self.media_ref = media_ref @@ -41,24 +49,24 @@ def sameName(self, cA): # note: local and source duration should always match, can assume same # compare the duration within the timeline for 2 clips def sameDuration(self, cA): - if(self.timeline_range.duration.value == cA.timeline_range.duration.value): - return True - else: - return False + return self.timeline_range.duration.value == cA.timeline_range.duration.value # compare 2 clips and see if they are the exact same, whether exact or moved along - # the timeline + # the timeline and also changes note based on edits def checkSame(self, cA): isSame = False # check names are same - if(self.sameName(cA)): + if self.sameName(cA): # check source range is same + # TODO: call trimmed range instead of source range + # TODO: make test where has null source range -> see things break, then go back and change <- low priority if(self.source_range == cA.source_range): # print(self.name, " ", self.timeline_range, " ", cA.timeline_range) # check in same place on timeline if(self.timeline_range == cA.timeline_range): isSame = True # check duration is same but not necessarily in same place on timeline + # TODO: change to else? (does the elif always run?) elif(self.sameDuration(cA)): # Note: check in relation to left and right? # know if moved in seq rather than everything shifted over because of lengthen/shorten of other clips @@ -104,6 +112,8 @@ def checkEdited(self, cA): if(selfDur.value == cADur.value): self.note = "start time changed" +# put note assignment into function, return note? +# self, other, olderClipData rather than cA # clip duration shorter elif(selfDur.value < cADur.value): self.note = "trimmed " + deltaFramesStr + " frames" diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py similarity index 90% rename from src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py rename to src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index f59f5ff0b..8c1fefc0d 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDif.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -8,10 +8,13 @@ from .clipData import ClipData from . import makeOtio -# set otio version to 0.17 -os.environ["OTIO_DEFAULT_TARGET_VERSION_FAMILY_LABEL"] = "OTIO_CORE:0.17.0" +# TODO: rename main?, rename tlA to timelineA +#currently only handles video and audio tracks + +# TODO: constant for video and audio (track.kind.video?) def main(tlA, tlB): + # TODO: put docstring here, descriptive name, most wordy descrip hasVideo = False hasAudio = False @@ -19,6 +22,7 @@ def main(tlA, tlB): if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: hasVideo = True else: + # TODO: put this in summary report print("no video tracks") if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: @@ -30,9 +34,10 @@ def main(tlA, tlB): outputTl = None # process video tracks, audio tracks, or both + # TODO: maybe table rather than db if hasVideo and hasAudio: - videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") - audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks()) + audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks()) makeSummary(videoClipDB, "Video", "perTrack") makeSummary(audioClipDB, "Audio", "summary") @@ -44,15 +49,19 @@ def main(tlA, tlB): outputTl.tracks.append(copy.deepcopy(t)) elif hasVideo: - videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks(), "Video") + videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks()) makeSummary(videoClipDB, "Video", "summary") outputTl = makeNewOtio(videoClipDB, "Video") elif hasAudio: - audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks(), "Audio") + audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks()) makeSummary(audioClipDB, "Audio", "summary") outputTl = makeNewOtio(audioClipDB, "Audio") + else: + # TODO: log no vid/aud or throw + pass + # Debug # origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) @@ -176,12 +185,13 @@ def compareClips(clipDatasA, clipDatasB): if cB.name not in namesA: added.append(cB) else: - isSame = cB.checkSame(namesA[cB.name]) + cB.pair = namesA[cB.name] + isSame = cB.checkSame(cB.pair) if(isSame): - cB.pair = namesA[cB.name] + # cB.pair = namesA[cB.name] unchanged.append(cB) else: - isEdited = cB.checkEdited(namesA[cB.name]) + isEdited = cB.checkEdited(cB.pair) if(isEdited): cB.pair = namesA[cB.name] edited.append(cB) @@ -198,6 +208,7 @@ def compareClips(clipDatasA, clipDatasB): if cA.name not in namesB: deleted.append(cA) +# TODO: some can be sets instead of lists return added, edited, unchanged, deleted # clip is an otio Clip @@ -209,6 +220,8 @@ def getTake(clip): take = None return take +# TODO: change name, make comparable rep? clip comparator? +# TODO: learn abt magic functions ex __eq__ def makeClipData(clip, trackNum): cd = ClipData(clip.name.split(" ")[0], clip.media_reference, @@ -262,18 +275,20 @@ def checkMoved(allDel, allAdd): # ones found as edited = moved and edited # wanted to compare full names to account for dif dep/take - # otherwise shotA (layout) and shotA (anim) would count as a move and not as add + # otherwise shotA (layout123) and shotA (anim123) would count as a move and not as add + # TODO: maybe preserve full name and also clip name, ex. id and name for c in allDel: c.name = c.source.name for c in allAdd: c.name = c.source.name newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) + # removes clips that are moved in same track, just keep clips moved between tracks moved = [clip for clip in moved if clip.track_num != clip.pair.track_num] for clip in moved: clip.note = "Moved from track: " + str(clip.pair.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) - + # TODO: check if empty string or not for i in moveEdit: i.note += " and moved from track " + str(i.pair.track_num) # print(i.name, i.note) @@ -345,19 +360,29 @@ def makeNewOtio(clipDB, trackType): newTl.tracks.extend(displayA) - makeOtio.colorMovedA(newTl, clipDB) + # makeOtio.colorMovedA(newTl, clipDB) return newTl +# TODO: rename to create bucket/cat/db/stuff; categorizeClipsByTracks + comment + def processTracks(tracksA, tracksB): + # TODO: add docstring like this for public facing functions, otherwise comment is ok + """Return a copy of the input timelines with only tracks that match + either the list of names given, or the list of track indexes given.""" clipDB = {} - # clipDB structure: {{1:{"add": [], "edit": [], "same": [], "delete": []}} + # READ ME IMPORTANT READ MEEEEEEE clipDB structure: {1:{"add": [], "edit": [], "same": [], "delete": []} # clipDB keys are track numbers, values are dictionaries # per track dictionary keys are clip categories, values are lists of clips of that category + + # TODO? ^change to class perhaps? low priority shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + # TODO: compute min of 2, then go through leftover and assign accordingly + # maybe compare unmatched against empty track? pad shorter one with empty + # Process Matched Tracks # index through all the tracks of the timeline with less tracks for i in range(0, len(shorterTlTracks)): @@ -400,7 +425,7 @@ def processTracks(tracksA, tracksB): clipDB[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} - + # recat added/deleted into moved clipDB = sortMoved(clipDB) # displayA, displayB = makeNewOtio(clipDB, trackType) diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index 867752193..aa73d3064 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -25,7 +25,7 @@ # sys.path.append("src/py-opentimelineio/opentimelineio/console/otiodiff") -from .otiodiff import getDif +from .otiodiff import getDiff def main(): @@ -116,14 +116,22 @@ def main(): for timeline in timelines: copy_media_to_folder(timeline, args.copy_media_to_folder) + # TODO: Update help text and numbering # ===== NEW Phase 5.5: Diff otio files ====== if args.diff: + # TODO: check there's exactly 2 timelines, complain if not + # error if less than 2, if more than 2 + # TODO? stack, concat, diff make mutually exclusive + # print("comparing:", timelines[0].name, timelines[1].name) - # function that serves as wrapper to call actual getDif main + # function that serves as wrapper to call actual getDiff main timelines = [diff_otio(timelines[0], timelines[1])] + # TODO: warning? if timeline empty (no output) + # TODO: test for empty timeline inputs + # Phase 6: Remove/Redaction if args.remove_metadata_key: @@ -248,6 +256,8 @@ def parse_arguments(): formatter_class=argparse.RawDescriptionHelpFormatter ) +# TODO: add ex for otiodiff above^ + # Input... parser.add_argument( "--input", @@ -504,7 +514,7 @@ def read_inputs(input_paths): # ======= NEW ======= def diff_otio(tlA, tlB): - return getDif.main(tlA, tlB) + return getDiff.main(tlA, tlB) # =================== diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index 3638f07c6..d74717800 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -3,7 +3,7 @@ from opentimelineio.console.otiodiff.clipData import ClipData import opentimelineio.console.otiodiff.makeOtio as makeOtio -import opentimelineio.console.otiodiff.getDif as getDiff +import opentimelineio.console.otiodiff.getDiff as getDiff from collections import namedtuple From 16d0fff601b80962d86c2522203b35ee59da9e56 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 14 Aug 2025 15:16:54 -0700 Subject: [PATCH 16/30] added input otio error handling/warnings, added video/audio tracks existence into timeline summary and updated variable/function names Signed-off-by: Yingjie Wang --- .../console/otiodiff/getDiff.py | 163 +++++++++--------- .../console/otiodiff/makeOtio.py | 24 ++- 2 files changed, 99 insertions(+), 88 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 8c1fefc0d..0a7731ff6 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -8,62 +8,56 @@ from .clipData import ClipData from . import makeOtio - -# TODO: rename main?, rename tlA to timelineA #currently only handles video and audio tracks - -# TODO: constant for video and audio (track.kind.video?) -def main(tlA, tlB): +def diff(timelineA, timelineB): # TODO: put docstring here, descriptive name, most wordy descrip hasVideo = False hasAudio = False # check input timelines for video and audio tracks - if len(tlA.video_tracks()) > 0 or len(tlB.video_tracks()) > 0: + if len(timelineA.video_tracks()) > 0 or len(timelineB.video_tracks()) > 0: hasVideo = True - else: - # TODO: put this in summary report - print("no video tracks") + # else: + # print("no video tracks") - if len(tlA.audio_tracks()) > 0 or len(tlB.audio_tracks()) > 0: + if len(timelineA.audio_tracks()) > 0 or len(timelineB.audio_tracks()) > 0: hasAudio = True - else: - print("no audio tracks") + # else: + # print("no audio tracks") - makeTlSummary(tlA, tlB) + makeTlSummary(timelineA, timelineB) outputTl = None # process video tracks, audio tracks, or both - # TODO: maybe table rather than db if hasVideo and hasAudio: - videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks()) - audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks()) + videoClipTable = processTracks(timelineA.video_tracks(), timelineB.video_tracks()) + audioClipTable = processTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) - makeSummary(videoClipDB, "Video", "perTrack") - makeSummary(audioClipDB, "Audio", "summary") + makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "perTrack") + makeSummary(audioClipTable, otio.schema.Track.Kind.Audio, "summary") - videoTl = makeNewOtio(videoClipDB, "Video") - outputTl = makeNewOtio(audioClipDB, "Audio") + videoTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) + outputTl = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) # combine for t in videoTl.tracks: outputTl.tracks.append(copy.deepcopy(t)) elif hasVideo: - videoClipDB = processTracks(tlA.video_tracks(), tlB.video_tracks()) - makeSummary(videoClipDB, "Video", "summary") - outputTl = makeNewOtio(videoClipDB, "Video") + videoClipTable = processTracks(timelineA.video_tracks(), timelineB.video_tracks()) + makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") + outputTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) elif hasAudio: - audioClipDB = processTracks(tlA.audio_tracks(), tlB.audio_tracks()) - makeSummary(audioClipDB, "Audio", "summary") - outputTl = makeNewOtio(audioClipDB, "Audio") + audioClipTable = processTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) + makeSummary(audioClipTable, "Audio", "summary") + outputTl = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) else: # TODO: log no vid/aud or throw pass # Debug - # origClipCount = len(tlA.find_clips()) + len(tlB.find_clips()) + # origClipCount = len(timelineA.find_clips()) + len(timelineB.find_clips()) # print(origClipCount) # print(len(outputTl.find_clips())) @@ -295,16 +289,16 @@ def checkMoved(allDel, allAdd): return newAdd, moveEdit, moved, newDel -# TODO: account for move edit, currently only identifies strictly moved -def sortMoved(clipDB): +# TODO? account for move edit, currently only identifies strictly moved +def sortMoved(clipTable): allAdd = [] allEdit = [] allSame = [] allDel = [] - for track in clipDB.keys(): - clipGroup = clipDB[track] - # print(clipDB[track]["add"]) + for track in clipTable.keys(): + clipGroup = clipTable[track] + # print(clipTable[track]["add"]) if "add" in clipGroup.keys(): allAdd.extend(clipGroup["add"]) if "delete" in clipGroup.keys(): @@ -320,47 +314,48 @@ def sortMoved(clipDB): # currently moved clips are still marked as delete in timelineA for cd in moved: - clipDB[cd.track_num]["add"].remove(cd) - clipDB[cd.track_num]["move"].append(cd) - # clipDB[cd.track_num]["delete"].remove(cd) - # clipDB[cd.pair.track_num]["moved"].append(cd.pair) + clipTable[cd.track_num]["add"].remove(cd) + clipTable[cd.track_num]["move"].append(cd) + # clipTable[cd.track_num]["delete"].remove(cd) + # clipTable[cd.pair.track_num]["moved"].append(cd.pair) - return clipDB + return clipTable -def makeNewOtio(clipDB, trackType): +def makeNewOtio(clipTable, trackType): newTl = otio.schema.Timeline(name="diffed") - displayA = [] - displayB = [] + # TODO: rename into track sets + tracksInA = [] + tracksInB = [] # make tracks A and B in output timeline - for trackNum in clipDB.keys(): + for trackNum in clipTable.keys(): # use named tuple here since clip categories won't change anymore - SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete', 'move']) - clipGroup = SortedClipDatas(clipDB[trackNum]["add"], clipDB[trackNum]["edit"], clipDB[trackNum]["same"], clipDB[trackNum]["delete"], clipDB[trackNum]["move"]) + SortedClipDatas = namedtuple('ClipGroup', ['add', 'edit', 'same', 'delete', 'move']) + clipGroup = SortedClipDatas(clipTable[trackNum]["add"], clipTable[trackNum]["edit"], clipTable[trackNum]["same"], clipTable[trackNum]["delete"], clipTable[trackNum]["move"]) - newA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) - displayA.append(newA) + newTrackA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) + tracksInA.append(newTrackA) - newB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) - displayB.append(newB) + newTrackB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) + tracksInB.append(newTrackB) # write order to output timeline so that timeline B is on top for both video and audio - if trackType == "Video": - newTl.tracks.extend(displayA) + if trackType == otio.schema.Track.Kind.Video: + newTl.tracks.extend(tracksInA) newEmpty = makeOtio.makeEmptyTrack(trackType) newTl.tracks.append(newEmpty) - newTl.tracks.extend(displayB) - elif trackType == "Audio": - newTl.tracks.extend(displayB) + newTl.tracks.extend(tracksInB) + elif trackType == otio.schema.Track.Kind.Audio: + newTl.tracks.extend(tracksInB) newEmpty = makeOtio.makeEmptyTrack(trackType) newTl.tracks.append(newEmpty) - newTl.tracks.extend(displayA) + newTl.tracks.extend(tracksInA) - # makeOtio.colorMovedA(newTl, clipDB) + # makeOtio.colorMovedA(newTl, clipTable) return newTl @@ -370,9 +365,9 @@ def processTracks(tracksA, tracksB): # TODO: add docstring like this for public facing functions, otherwise comment is ok """Return a copy of the input timelines with only tracks that match either the list of names given, or the list of track indexes given.""" - clipDB = {} - # READ ME IMPORTANT READ MEEEEEEE clipDB structure: {1:{"add": [], "edit": [], "same": [], "delete": []} - # clipDB keys are track numbers, values are dictionaries + clipTable = {} + # READ ME IMPORTANT READ MEEEEEEE clipTable structure: {1:{"add": [], "edit": [], "same": [], "delete": []} + # clipTable keys are track numbers, values are dictionaries # per track dictionary keys are clip categories, values are lists of clips of that category # TODO? ^change to class perhaps? low priority @@ -394,12 +389,12 @@ def processTracks(tracksA, tracksB): add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) # print(add) - clipDB[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} - # print("here", clipDB[trackNum]["add"][0].name) + clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} + # print("here", clipTable[trackNum]["add"][0].name) # Process Unmatched Tracks if shorterTlTracks == tracksA: - # tlA is shorter so tlB has added tracks + # timelineA is shorter so timelineB has added tracks for i in range(len(shorterTlTracks), len(tracksB)): newTrack = tracksB[i] trackNum = i + 1 @@ -410,7 +405,7 @@ def processTracks(tracksA, tracksB): cd = makeClipData(c, trackNum) added.append(cd) - clipDB[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} + clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} else: for i in range(len(shorterTlTracks), len(tracksA)): @@ -423,15 +418,15 @@ def processTracks(tracksA, tracksB): cd = makeClipData(c, trackNum) deleted.append(cd) - clipDB[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} + clipTable[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} # recat added/deleted into moved - clipDB = sortMoved(clipDB) + clipTable = sortMoved(clipTable) - # displayA, displayB = makeNewOtio(clipDB, trackType) - return clipDB + # tracksInA, tracksInB = makeNewOtio(clipTable, trackType) + return clipTable -def makeSummary(clipDB, trackType, mode): +def makeSummary(clipTable, trackType, mode): print(trackType.upper(), "CLIPS") print("===================================") print(" Overview Summary ") @@ -444,8 +439,8 @@ def makeSummary(clipDB, trackType, mode): allMove = [] if mode == "summary": - for track in clipDB.keys(): - clipGroup = clipDB[track] + for track in clipTable.keys(): + clipGroup = clipTable[track] allAdd.extend(clipGroup["add"]) if "add" in clipGroup.keys() else print("no add") allDel.extend(clipGroup["delete"]) if "delete" in clipGroup.keys() else print("no del") @@ -460,8 +455,8 @@ def makeSummary(clipDB, trackType, mode): if mode == "perTrack": # print by track - for track in clipDB.keys(): - clipGroup = clipDB[track] + for track in clipTable.keys(): + clipGroup = clipTable[track] print("================== Track", track, "==================") for cat in clipGroup.keys(): print("") @@ -471,24 +466,32 @@ def makeSummary(clipDB, trackType, mode): print(i.name) print("") -def makeTlSummary(tlA, tlB): - print("Comparing Timeline B:", tlB.name, "vs") - print(" Timeline A:", tlA.name) +def makeTlSummary(timelineA, timelineB): + print("Comparing Timeline B:", timelineB.name, "vs") + print(" Timeline A:", timelineA.name) print("") + + if len(timelineA.video_tracks()) == 0: + print("No video tracks in A") + if len(timelineB.video_tracks()) == 0: + print("No video tracks in B") + + if len(timelineA.audio_tracks()) == 0: + print("No audio tracks in A") + if len(timelineB.audio_tracks()) == 0: + print("No audio tracks in B") + # compare overall file duration - if(tlB.duration() > tlA.duration()): - delta = tlB.duration().to_seconds() - tlA.duration().to_seconds() + if(timelineB.duration() > timelineA.duration()): + delta = timelineB.duration().to_seconds() - timelineA.duration().to_seconds() print(f"Timeline duration increased by {delta:.2f} seconds") - elif(tlB.duration() < tlA.duration()): - delta = tlA.duration().to_seconds() - tlB.duration().to_seconds() + elif(timelineB.duration() < timelineA.duration()): + delta = timelineA.duration().to_seconds() - timelineB.duration().to_seconds() print(f"Timeline duration decreased by {delta:.2f} seconds") else: print("Timeline duration did not change") print("") -if __name__ == "__main__": - main() - ''' ======= Notes ======= maybe can make use of algorithms.filter.filter_composition diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 4d11f638b..06e150d25 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -30,6 +30,8 @@ def sortClips(trackClips): def addRavenColor(clip, color): # print(clip.metadata) + # TODO: if raven not in metadata, add empty dict + if "raven" in clip.metadata: clip.metadata["raven"]["color"] = color.upper() else: @@ -46,20 +48,22 @@ def addMarker(newClip, color, clipData): color = color.upper() newMarker.color = color - if isinstance(clipData, ClipData) and clipData.note is not None: - # print("edit note added") - newMarker.name = clipData.note - if(color == "GREEN"): newMarker.name = "added" elif(color == "PINK"): newMarker.name = "deleted" + if isinstance(clipData, ClipData) and clipData.note is not None: + # print("edit note added") + newMarker.name = clipData.note + + newClip.markers.append(newMarker) return newClip # make new blank track that acts as a separator between the A and B sections +# TODO: make separater track def makeEmptyTrack(trackType): return otio.schema.Track(name="=====================", kind=trackType) @@ -96,6 +100,7 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) newClip = copy.deepcopy(clipData.source) if clipColor is not None: newClip = addRavenColor(newClip, clipColor) + # TODO: move out of if and make clipColor optional with default color if markersOn: newClip = addMarker(newClip, clipColor, clipData) track.append(newClip) @@ -110,9 +115,9 @@ def makeTrackB(clipGroup, trackNum, trackKind): tMovedV = makeTrack("moved", trackKind, clipGroup.move, "PURPLE", markersOn=True) flatB = otio.core.flatten_stack([tSameV, tEditedV, tAddV, tMovedV]) - if trackKind == "Video": + if trackKind == otio.schema.Track.Kind.Video: flatB.name = "Video B" + str(trackNum) - elif trackKind == "Audio": + elif trackKind == otio.schema.Track.Kind.Audio: flatB.name = "Audio B" + str(trackNum) flatB.kind = trackKind @@ -132,10 +137,13 @@ def makeTrackA(clipGroup, trackNum, trackKind): tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") + # TODO: explain the make sep then merge flatten tracks thing flatA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) - if trackKind == "Video": + + # TODO: change video to directly use trackKind + if trackKind == otio.schema.Track.Kind.Video: flatA.name = "Video A" + str(trackNum) - elif trackKind == "Audio": + elif trackKind == otio.schema.Track.Kind.Audio: flatA.name = "Audio A" + str(trackNum) flatA.kind = trackKind From 64ea08ed4d6ed21163084244dfaae07bfb31147f Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 14 Aug 2025 17:38:32 -0700 Subject: [PATCH 17/30] changed ClipData init to pull info from source OTIO clip, updated instance variable names to be more descriptive, refactored test and diff scripts to match Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 51 +- .../console/otiodiff/getDiff.py | 61 +- .../console/otiodiff/makeOtio.py | 4 +- tests/test_otiodiff.py | 774 ++++++++++++------ 4 files changed, 571 insertions(+), 319 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 2bdc246b0..2f4ab6fbb 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -1,37 +1,44 @@ import opentimelineio as otio -# clip comparable??? ClipInfo -# TODO: add fullname, full name = name + take, name is just name, add ex, split on space, b4 is name, after is version -# TODO: rename take to version? +# TODO: clip comparable??? ClipInfo +# source clip or clip ref? +# full name = name + version, name is just name, add ex, split on space, b4 is name, after is version class ClipData: + full_name = "" name = "" - # currently not used in comparisons - take = None + version = None # currently not used in comparisons media_ref = None source_range = otio.opentime.TimeRange() timeline_range = otio.opentime.TimeRange() + track_num = None # not originally stored in otio.schema.Clip + source_clip = otio.schema.Clip() + # everything below holds comparison result info note = "" - source = otio.schema.Clip() - pair = None - track_num = None - - # TODO: sort so above is compare, bottom is additional compare result info - -# TODO: rename source to sourceClip, rename pair to matchedClipData? -# just pass source clip and track num so construct here - def __init__(self, name, media_ref, source_range, timeline_range, track_num, source, take=None, note=None): - self.name = name - self.media_ref = media_ref - self.source_range = source_range - self.timeline_range = timeline_range + matched_clipData = None + + def __init__(self, source_clip, track_num, note=None): + self.full_name = source_clip.name + self.name = self.splitFullName(source_clip)[0] + self.version = self.splitFullName(source_clip)[1] + self.media_ref = source_clip.media_reference + self.source_range = source_clip.source_range + self.timeline_range = source_clip.trimmed_range_in_parent() self.track_num = track_num - self.source = source - self.take = take + self.source_clip = source_clip self.note = note + + # split full name into name of clip and version by white space + # uses structure of "clipA v1" where clipA is the name and v1 is the version + def splitFullName(self, clip): + shortName = clip.name.split(" ")[0] + version = clip.name.split(" ")[1] if len(clip.name.split(" ")) > 1 else None + + return shortName, version + def printData(self): print("name: ", self.name) - print("take: ", self.take) + print("version: ", self.version) print("media ref: ", self.media_ref) print("source start time: ", self.source_range.start_time.value, " duration: ", self.source_range.duration.value) print("timeline start time:", self.timeline_range.start_time.value, " duration: ", self.timeline_range.duration.value) @@ -110,7 +117,7 @@ def checkEdited(self, cA): deltaFramesStr = str(abs(selfDur.to_frames() - cADur.to_frames())) if(selfDur.value == cADur.value): - self.note = "start time changed" + self.note = "start time in source range changed" # put note assignment into function, return note? # self, other, olderClipData rather than cA diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 0a7731ff6..cb7e27e3f 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -179,15 +179,15 @@ def compareClips(clipDatasA, clipDatasB): if cB.name not in namesA: added.append(cB) else: - cB.pair = namesA[cB.name] - isSame = cB.checkSame(cB.pair) + cB.matched_clipData = namesA[cB.name] + isSame = cB.checkSame(cB.matched_clipData) if(isSame): # cB.pair = namesA[cB.name] unchanged.append(cB) else: - isEdited = cB.checkEdited(cB.pair) + isEdited = cB.checkEdited(cB.matched_clipData) if(isEdited): - cB.pair = namesA[cB.name] + # cB.matched_clipData = namesA[cB.name] edited.append(cB) else: print("======== not categorized ==========") @@ -205,26 +205,26 @@ def compareClips(clipDatasA, clipDatasB): # TODO: some can be sets instead of lists return added, edited, unchanged, deleted -# clip is an otio Clip -def getTake(clip): - take = None - if(len(clip.name.split(" ")) > 1): - take = clip.name.split(" ")[1] - else: - take = None - return take +# # clip is an otio Clip +# def getTake(clip): +# take = None +# if(len(clip.name.split(" ")) > 1): +# take = clip.name.split(" ")[1] +# else: +# take = None +# return take # TODO: change name, make comparable rep? clip comparator? # TODO: learn abt magic functions ex __eq__ -def makeClipData(clip, trackNum): - cd = ClipData(clip.name.split(" ")[0], - clip.media_reference, - clip.source_range, - clip.trimmed_range_in_parent(), - trackNum, - clip, - getTake(clip)) - return cd +# def makeClipData(clip, trackNum): +# cd = ClipData(clip.name.split(" ")[0], +# clip.media_reference, +# clip.source_range, +# clip.trimmed_range_in_parent(), +# trackNum, +# clip, +# getTake(clip)) +# return cd # the consolidated version of processVideo and processAudio, meant to replace both def compareTracks(trackA, trackB, trackNum): @@ -233,12 +233,12 @@ def compareTracks(trackA, trackB, trackNum): for c in trackA.find_clips(): # put clip info into ClipData - cd = makeClipData(c, trackNum) + cd = ClipData(c, trackNum) clipDatasA.append(cd) for c in trackB.find_clips(): # put clip info into ClipData - cd = makeClipData(c, trackNum) + cd = ClipData(c, trackNum) clipDatasB.append(cd) (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) @@ -271,20 +271,21 @@ def checkMoved(allDel, allAdd): # wanted to compare full names to account for dif dep/take # otherwise shotA (layout123) and shotA (anim123) would count as a move and not as add # TODO: maybe preserve full name and also clip name, ex. id and name + # TODO: fix compareClips so that it allows check by full name for c in allDel: - c.name = c.source.name + c.name = c.full_name for c in allAdd: - c.name = c.source.name + c.name = c.full_name newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) # removes clips that are moved in same track, just keep clips moved between tracks - moved = [clip for clip in moved if clip.track_num != clip.pair.track_num] + moved = [clip for clip in moved if clip.track_num != clip.matched_clipData.track_num] for clip in moved: - clip.note = "Moved from track: " + str(clip.pair.track_num) + clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) # TODO: check if empty string or not for i in moveEdit: - i.note += " and moved from track " + str(i.pair.track_num) + i.note += " and moved from track " + str(i.matched_clipData.track_num) # print(i.name, i.note) return newAdd, moveEdit, moved, newDel @@ -402,7 +403,7 @@ def processTracks(tracksA, tracksB): added = [] for c in newTrack.find_clips(): - cd = makeClipData(c, trackNum) + cd = ClipData(c, trackNum) added.append(cd) clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} @@ -415,7 +416,7 @@ def processTracks(tracksA, tracksB): deleted = [] for c in newTrack.find_clips(): - cd = makeClipData(c, trackNum) + cd = ClipData(c, trackNum) deleted.append(cd) clipTable[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 06e150d25..6631e9bfb 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -97,7 +97,7 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) currentEnd += tlDuration # add clip to track - newClip = copy.deepcopy(clipData.source) + newClip = copy.deepcopy(clipData.source_clip) if clipColor is not None: newClip = addRavenColor(newClip, clipColor) # TODO: move out of if and make clipColor optional with default color @@ -132,7 +132,7 @@ def makeTrackA(clipGroup, trackNum, trackKind): prevEdited = [] prevMoved = [] for e in clipGroup.edit: - prevEdited.append(e.pair) + prevEdited.append(e.matched_clipData) tEditedV = makeTrack("edited", trackKind, prevEdited, "ORANGE") tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index d74717800..d70f9467b 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -10,201 +10,398 @@ class TestClipData(unittest.TestCase): # check if the names of two ClipDatas are the same + def test_same_name(self): - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - assert(clipA.sameName(clipB)) + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.sameName(clipDataA) def test_different_name(self): - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName2", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - assert(not clipA.sameName(clipB)) + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName2 testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.sameName(clipDataA) def test_same_duration(self): # check that length of clip is the same - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName2", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - assert(clipA.sameDuration(clipB)) + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + assert clipDataB.sameDuration(clipDataA) + def test_different_duration(self): # check that length of clip is the different - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(duration=otio.opentime.RationalTime(100,1)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName2", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - assert(not clipA.sameDuration(clipB)) + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.sameDuration(clipDataA) def test_check_same(self): # check that two exact same clips are the same # check that two exact same clips but moved in the timeline are the same - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - - assert clipA.checkSame(clipB) + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() - def test_check_not_same(self): - # check that two exact same clips are the same + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkSame(clipDataA) + + def test_check_same_if_move(self): # check that two exact same clips but moved in the timeline are the same - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName2", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + gapDur = otio.opentime.RationalTime(5, 24) + gap = otio.schema.Gap(duration = gapDur) + + trackA.append(clipA) + trackB.extend([gap, clipB]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkSame(clipDataA) + assert clipDataB.note == "moved" - assert not clipA.checkSame(clipB) - assert clipA.note is None + def test_check_not_same(self): + # check that two clips with different names are not the same + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName2 testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None def test_check_not_same2(self): - # check that two exact same clips are the same - # check that two exact same clips but moved in the timeline are the same - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(duration=otio.opentime.RationalTime(100,1)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") + # check that two clips with different source range start durations are not the same + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) - assert not clipA.checkSame(clipB) - assert clipA.note is None + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None + + def test_check_not_same3(self): + # check that two clips with different source range start times are not the same + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() - def test_check_not_same3_moved(self): - # check that two exact same clips are the same - # check that two exact same clips but moved in the timeline are the same - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(100,1)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) - assert clipA.checkSame(clipB) - assert clipA.note == "moved" + assert not clipDataB.checkSame(clipDataA) + assert clipDataB.note is None def test_check_Edited(self): # check for trim head/tail and lengthen head/tail - clipA = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(0,1)), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") - clipB = ClipData("testName", - "testMR.mov", - otio.opentime.TimeRange(start_time=otio.opentime.RationalTime(100,1)), - otio.opentime.TimeRange(), - otio.schema.Clip(), - "testTake") + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(90, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(90, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) - assert clipA.checkEdited(clipB) - assert clipA.note == "start time changed" + assert clipDataB.checkEdited(clipDataA) + assert clipDataB.note == "trimmed 10 frames" class TestGetDif(unittest.TestCase): def test_find_clones(self): - clipA = ClipData("clipA", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("clipB", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipC = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipCClone = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipD = ClipData("clipD", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(40, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") + clipA = otio.schema.Clip( + name = "clipA testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name = "clipB testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name = "clipD testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) - testClips = [clipA, clipB, clipC, clipCClone, clipD] + testClips = [clipDataA, clipDataB, clipDataC, clipDataCClone, clipDataD] clones, nonClones = getDiff.findClones(testClips) - correctClones = {clipC.name: [clipC, clipCClone]} - correctNonClones = [clipA, clipB, clipD] + correctClones = {clipDataC.name: [clipDataC, clipDataCClone]} + correctNonClones = [clipDataA, clipDataB, clipDataD] assert(clones == correctClones), "Not all cloned clips correctly identified" assert(nonClones == correctNonClones), "Not all unique clips correctly identified" @@ -212,38 +409,53 @@ def test_find_clones(self): def test_sort_clones_clones_in_both(self): # SETUP - clipA = ClipData("clipA", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("clipB", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipC = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipCClone = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipD = ClipData("clipD", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipDatasA = [clipA, clipB, clipC, clipCClone] - clipDatasB = [clipB, clipC, clipCClone, clipD] + clipA = otio.schema.Clip( + name = "clipA testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name = "clipB testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name = "clipD testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataB, clipDataC, clipDataCClone, clipDataD] # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) @@ -258,38 +470,54 @@ def test_sort_clones_clones_in_both(self): assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" def test_sort_clones_clones_in_one(self): - clipA = ClipData("clipA", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("clipB", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipC = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipCClone = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipD = ClipData("clipD", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipDatasA = [clipA, clipB, clipC, clipCClone] - clipDatasB = [clipA, clipB, clipD] + # SETUP + clipA = otio.schema.Clip( + name = "clipA testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name = "clipB testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name = "clipD testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataA, clipDataB, clipDataD] # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) @@ -304,38 +532,54 @@ def test_sort_clones_clones_in_one(self): assert(len(nonClonesB) == 3), "Number of non-clones found in trackB doesn't match" def test_sort_clones_clones_in_one_single_in_other(self): - clipA = ClipData("clipA", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipB = ClipData("clipB", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipC = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(20, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipCClone = ClipData("clipC", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(30, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipD = ClipData("clipD", - "testMR.mov", - otio.opentime.TimeRange(), - otio.opentime.TimeRange(otio.opentime.RationalTime(0, 24), otio.opentime.RationalTime(10, 24)), - otio.schema.Clip(), - "testTake") - clipDatasA = [clipA, clipB, clipC, clipCClone] - clipDatasB = [clipB, clipC, clipD] + # SETUP + clipA = otio.schema.Clip( + name = "clipA testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipB = otio.schema.Clip( + name = "clipB testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipC = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipCClone = otio.schema.Clip( + name = "clipC testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), + ) + clipD = otio.schema.Clip( + name = "clipD testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), + ) + trackA = otio.schema.Track() + + trackA.extend([clipA, clipB, clipC, clipCClone, clipD]) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + clipDataC = ClipData(clipC, 1) + clipDataCClone = ClipData(clipCClone, 1) + clipDataD = ClipData(clipD, 1) + + clipDatasA = [clipDataA, clipDataB, clipDataC, clipDataCClone] + clipDatasB = [clipDataB, clipDataC, clipDataD] # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) @@ -349,13 +593,13 @@ def test_sort_clones_clones_in_one_single_in_other(self): assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" -class TestMakeOtio(unittest.TestCase): - # Test the type parameter to makeTimelineOfType, but not the detailed results. - def test_make_timeline_type(self): - # SETUP - trackA = otio.schema.Track() - trackB = otio.schema.Track() - pass +# class TestMakeOtio(unittest.TestCase): +# # Test the type parameter to makeTimelineOfType, but not the detailed results. +# def test_make_timeline_type(self): +# # SETUP +# trackA = otio.schema.Track() +# trackB = otio.schema.Track() +# pass # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) # videoGroup = SortedClipDatas([], [], [], []) From fb26df8feefa12a883cdbe9db8d930f86bde7e20 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Thu, 14 Aug 2025 17:46:57 -0700 Subject: [PATCH 18/30] added otiodiff description and example to otiotool description, fixed typo in inspect example, and updated otiodiff function call from main to diff Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiotool.py | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index aa73d3064..9d4444bb6 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -116,21 +116,22 @@ def main(): for timeline in timelines: copy_media_to_folder(timeline, args.copy_media_to_folder) - # TODO: Update help text and numbering + # TODO: Update numbering # ===== NEW Phase 5.5: Diff otio files ====== if args.diff: - # TODO: check there's exactly 2 timelines, complain if not - # error if less than 2, if more than 2 # TODO? stack, concat, diff make mutually exclusive # print("comparing:", timelines[0].name, timelines[1].name) + assert len(timelines) >= 2, "Less than 2 timelines given. 2 timelines are required to perform a diff" - # function that serves as wrapper to call actual getDiff main - timelines = [diff_otio(timelines[0], timelines[1])] - - # TODO: warning? if timeline empty (no output) - # TODO: test for empty timeline inputs + # TODO: test for empty timeline inputs, currently checks for existence of timeline but not of tracks in timeline + if len(timelines) == 2: + timelines = [diff_otio(timelines[0], timelines[1])] + if len(timelines) == 0: + print("no output timeline generated by diff") + else: + print("Warning: more than 2 timelines provided as input. Only the first two timelines will be diffed.") # Phase 6: Remove/Redaction @@ -227,6 +228,11 @@ def parse_arguments(): If specified, the --redact option, will remove ALL metadata and rename all objects in the OTIO with generic names (e.g. "Track 1", "Clip 17", etc.) +5.5 Diff + The --diff option allows you to compare two OTIO files. It generates an + OTIO file annotated with the differences as well as a in console text + summary report. --diff can't be used concurrently with --stack or --concat + 6. Inspect Options such as --stats, --list-clips, --list-tracks, --list-media, --verify-media, --list-markers, --verify-ranges, and --inspect @@ -251,13 +257,14 @@ def parse_arguments(): otiotool -i playlist.otio --verify-media Inspect specific audio clips in detail: -otiotool -i playlist.otio --only-audio --list-tracks --inspect "Interview" +otiotool -i playlist.otio --audio-only --list-tracks --inspect "Interview" + +Diff fileB against fileA (ordering matters where fileA is the file fileB compares against): +otiotool -i fileA.otio fileB.otio --diff --o display.otio """, formatter_class=argparse.RawDescriptionHelpFormatter ) -# TODO: add ex for otiodiff above^ - # Input... parser.add_argument( "--input", @@ -514,7 +521,7 @@ def read_inputs(input_paths): # ======= NEW ======= def diff_otio(tlA, tlB): - return getDiff.main(tlA, tlB) + return getDiff.diff(tlA, tlB) # =================== From b40e4177ad289c89a665b17c1fb5fff89da27902 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 15 Aug 2025 13:15:50 -0700 Subject: [PATCH 19/30] added docstrings, comments, and updated variable names Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 2 +- .../console/otiodiff/getDiff.py | 199 +++++++++--------- .../opentimelineio/console/otiotool.py | 35 +-- 3 files changed, 116 insertions(+), 120 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 2f4ab6fbb..1033f5e2a 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -65,7 +65,7 @@ def checkSame(self, cA): # check names are same if self.sameName(cA): # check source range is same - # TODO: call trimmed range instead of source range + # TODO: call trimmed range instead of source range ??? # TODO: make test where has null source range -> see things break, then go back and change <- low priority if(self.source_range == cA.source_range): # print(self.name, " ", self.timeline_range, " ", cA.timeline_range) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index cb7e27e3f..28ed2bd6c 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -8,9 +8,18 @@ from .clipData import ClipData from . import makeOtio -#currently only handles video and audio tracks -def diff(timelineA, timelineB): - # TODO: put docstring here, descriptive name, most wordy descrip +def diffTimelines(timelineA, timelineB): + '''Diff two OTIO timelines and identify how clips on video and/or audio tracks changed from timeline A to timeline B. + Return an annotated otio file with the differences and print a text summary to console. + + Parameters: + timelineA (otio.schema.Timeline()): timeline from the file you want to compare against, ex. clip1 version 1 + timelineB (otio.schema.Timeline()): timeline from the file you want to compare, ex. clip1 version 2 + + Returns: + outputTimeline (otio.schema.Timeline()): timeline with color coded clips and marker annotations showing the + differences between the input tracks with the tracks from timeline B stacked on top of timeline A + ''' hasVideo = False hasAudio = False @@ -25,61 +34,58 @@ def diff(timelineA, timelineB): # else: # print("no audio tracks") - makeTlSummary(timelineA, timelineB) + makeTimelineSummary(timelineA, timelineB) - outputTl = None + outputTimeline = None # process video tracks, audio tracks, or both if hasVideo and hasAudio: - videoClipTable = processTracks(timelineA.video_tracks(), timelineB.video_tracks()) - audioClipTable = processTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) + videoClipTable = categorizeClipsByTracks(timelineA.video_tracks(), timelineB.video_tracks()) + audioClipTable = categorizeClipsByTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "perTrack") makeSummary(audioClipTable, otio.schema.Track.Kind.Audio, "summary") videoTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) - outputTl = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) + outputTimeline = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) # combine for t in videoTl.tracks: - outputTl.tracks.append(copy.deepcopy(t)) + outputTimeline.tracks.append(copy.deepcopy(t)) elif hasVideo: - videoClipTable = processTracks(timelineA.video_tracks(), timelineB.video_tracks()) + videoClipTable = categorizeClipsByTracks(timelineA.video_tracks(), timelineB.video_tracks()) makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") - outputTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) + outputTimeline = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) elif hasAudio: - audioClipTable = processTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) + audioClipTable = categorizeClipsByTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) makeSummary(audioClipTable, "Audio", "summary") - outputTl = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) + outputTimeline = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) else: - # TODO: log no vid/aud or throw - pass + print("No video or audio tracks found in both timelines.") # Debug # origClipCount = len(timelineA.find_clips()) + len(timelineB.find_clips()) # print(origClipCount) - # print(len(outputTl.find_clips())) + # print(len(outputTimeline.find_clips())) - return outputTl + return outputTimeline -def toOtio(data, path): - otio.adapters.write_to_file(data, path) - -# for debugging, put response into file -def toJson(file): - with open("clipDebug.json", "w") as f: - f.write(file) +# TODO: make nonClones a set rather than a list +def findClones(clips): + """Separate the cloned ClipDatas (ones that share the same name) from the unique ClipDatas and return both + + Paramenters: + clips (list of ClipDatas): list of ClipDatas -def toTxt(file): - with open("report.txt", "w") as f: - f.write(file) + Returns: + clones (dictionary): dictionary of all clones in the group of ClipDatas + keys: name of clone + values: list of ClipDatas of that name + nonClones (list): list of unique clones in group of ClipDatas\ + """ -# create a dictionary with all the cloned clips (ones that share the same truncated name) -# key is the truncated name, value is a list of ClipDatas -# @parameter clips, list of ClipDatas -def findClones(clips): clones = {} nonClones = [] names = [] @@ -98,6 +104,8 @@ def findClones(clips): return clones, nonClones def sortClones(clipDatasA, clipDatasB): + """Identify cloned ClipDatas (ones that share the same name) across two groups of ClipDatas and separate from the unique + ClipDatas (ones that only appear once in each group)""" # find cloned clips and separate out from unique clips clonesA, nonClonesA = findClones(clipDatasA) clonesB, nonClonesB = findClones(clipDatasB) @@ -120,8 +128,8 @@ def sortClones(clipDatasA, clipDatasB): # clipCountB = 0 return (clonesA, nonClonesA), (clonesB, nonClonesB) -# compare all clips that had a clone def compareClones(clonesA, clonesB): + """Compare two groups of cloned ClipDatas and categorize into added, unchanged, or deleted""" added = [] unchanged = [] deleted = [] @@ -160,8 +168,8 @@ def compareClones(clonesA, clonesB): return added, unchanged, deleted -# compare all strictly unique clips def compareClips(clipDatasA, clipDatasB): + """Compare two groups of unique ClipDatas and categorize into added, edited, unchanged, and deleted""" namesA = {} namesB = {} @@ -176,6 +184,7 @@ def compareClips(clipDatasA, clipDatasB): namesB[c.name] = c for cB in clipDatasB: + if cB.name not in namesA: added.append(cB) else: @@ -205,29 +214,8 @@ def compareClips(clipDatasA, clipDatasB): # TODO: some can be sets instead of lists return added, edited, unchanged, deleted -# # clip is an otio Clip -# def getTake(clip): -# take = None -# if(len(clip.name.split(" ")) > 1): -# take = clip.name.split(" ")[1] -# else: -# take = None -# return take - -# TODO: change name, make comparable rep? clip comparator? -# TODO: learn abt magic functions ex __eq__ -# def makeClipData(clip, trackNum): -# cd = ClipData(clip.name.split(" ")[0], -# clip.media_reference, -# clip.source_range, -# clip.trimmed_range_in_parent(), -# trackNum, -# clip, -# getTake(clip)) -# return cd - -# the consolidated version of processVideo and processAudio, meant to replace both def compareTracks(trackA, trackB, trackNum): + """Compare clipis in two OTIO tracks and categorize into added, edited, same, and deleted""" clipDatasA = [] clipDatasB = [] @@ -244,27 +232,25 @@ def compareTracks(trackA, trackB, trackNum): (clonesA, nonClonesA), (clonesB, nonClonesB) = sortClones(clipDatasA, clipDatasB) # compare clips and put into categories - addV = [] - editV = [] - sameV = [] - deleteV = [] + added = [] + edited = [] + unchanged = [] + deleted = [] # compare and categorize unique clips - addV, editV, sameV, deleteV = compareClips(nonClonesA, nonClonesB) + added, edited, unchanged, deleted = compareClips(nonClonesA, nonClonesB) # compare and categorize cloned clips - addCloneV, sameCloneV, deleteCloneV = compareClones(clonesA, clonesB) - addV.extend(addCloneV) - sameV.extend(sameCloneV) - deleteV.extend(deleteCloneV) + addedClone, unchangedClone, deletedClone = compareClones(clonesA, clonesB) + added.extend(addedClone) + unchanged.extend(unchangedClone) + deleted.extend(deletedClone) - # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) - # videoGroup = SortedClipDatas(addV, editV, sameV, deleteV) - - return addV, editV, sameV, deleteV - # return videoGroup + return added, edited, unchanged, deleted +# TODO? account for move edit, currently only identifies strictly moved def checkMoved(allDel, allAdd): + """Identify ClipDatas that have moved between different tracks""" # ones found as same = moved # ones found as edited = moved and edited @@ -290,8 +276,8 @@ def checkMoved(allDel, allAdd): return newAdd, moveEdit, moved, newDel -# TODO? account for move edit, currently only identifies strictly moved def sortMoved(clipTable): + """Put ClipDatas that have moved between tracks into their own category and remove from their previous category""" allAdd = [] allEdit = [] allSame = [] @@ -323,8 +309,17 @@ def sortMoved(clipTable): return clipTable def makeNewOtio(clipTable, trackType): + """Make a new annotated OTIO timeline showing the change from timeline A to timeline B, with the tracks + from timeline B stacked on top of the tracks from timeline A + + Ex. New timeline showing the differences of timeline A and B with 2 tracks each + Track 2B + Track 1B + ======== + Track 2A + Track 1A + """ newTl = otio.schema.Timeline(name="diffed") - # TODO: rename into track sets tracksInA = [] tracksInB = [] @@ -362,15 +357,29 @@ def makeNewOtio(clipTable, trackType): # TODO: rename to create bucket/cat/db/stuff; categorizeClipsByTracks + comment -def processTracks(tracksA, tracksB): - # TODO: add docstring like this for public facing functions, otherwise comment is ok - """Return a copy of the input timelines with only tracks that match - either the list of names given, or the list of track indexes given.""" - clipTable = {} - # READ ME IMPORTANT READ MEEEEEEE clipTable structure: {1:{"add": [], "edit": [], "same": [], "delete": []} - # clipTable keys are track numbers, values are dictionaries - # per track dictionary keys are clip categories, values are lists of clips of that category +def categorizeClipsByTracks(tracksA, tracksB): + """Compare the clips in each track in tracksB against the corresponding track in tracksA + and categorize based on how they have changed. Return a dictionary table of ClipDatas + categorized by added, edited, unchanged, deleted, and moved and ordered by track. + + Parameters: + tracksA (list of otio.schema.Track() elements): list of tracks from timeline A + tracksB (list of otio.schema.Track() elements): list of tracks from timeline B + + Returns: + clipTable (dictionary): dictionary holding categorized ClipDatas, organized by the track number of the ClipDatas + dictionary keys: track number (int) + dictionary values: dictionary holding categorized ClipDatas of that track + nested dictionary keys: category name (string) + nested dictionary values: list of ClipDatas that fall into the category + + ex: clipTable when tracksA and tracksB contain 3 tracks + {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 3 : {"add": [], "edit": [], "same": [], "delete": []}, "move": []} + """ + clipTable = {} # TODO? ^change to class perhaps? low priority shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB @@ -428,6 +437,8 @@ def processTracks(tracksA, tracksB): return clipTable def makeSummary(clipTable, trackType, mode): + """Summarize what clips got changed and how they changed and print to console.""" + print(trackType.upper(), "CLIPS") print("===================================") print(" Overview Summary ") @@ -464,10 +475,11 @@ def makeSummary(clipTable, trackType, mode): print(cat.upper(), ":", len(clipGroup[cat])) if cat != "same": for i in clipGroup[cat]: - print(i.name) + print(i.name + ": " + i.note) if i.note is not None else print(i.name) print("") -def makeTlSummary(timelineA, timelineB): +def makeTimelineSummary(timelineA, timelineB): + """Summarize information about the two timelines compared and print to console.""" print("Comparing Timeline B:", timelineB.name, "vs") print(" Timeline A:", timelineA.name) print("") @@ -494,31 +506,12 @@ def makeTlSummary(timelineA, timelineB): print("") ''' ======= Notes ======= - maybe can make use of algorithms.filter.filter_composition - -# a test using python difflib, prob not useful - # # find deltas of 2 files and print into html site - # d = HtmlDiff(wrapcolumn=100) - # diff = d.make_file(file1.splitlines(), file2.splitlines(), context=True) - # with open("diff.html", "w", encoding="utf-8") as f: - # f.write(diff) - - # s = SequenceMatcher(None, file1, file2) - # print(s.quick_ratio()) - - # each one in new check with each one in old - # if everything matches, unchanged <- can't just check with first instance because might have added one before it - # if everything matches except for timeline position-> moved - # if length doesn't match, look for ordering? or just classify as added/deleted - # if counts of old and new dif then def add/deleted - - Test shot simple: - python ./src/getDif.py /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio + /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio Test seq matching edit's skywalker: - python ./src/getDif.py /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ChangeNotes.Relinked.01.otio /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio + /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ChangeNotes.Relinked.01.otio /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio Test shot multitrack: - python ./src/getDif.py /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio + /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio ''' \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index 9d4444bb6..c15901d81 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -121,17 +121,7 @@ def main(): if args.diff: # TODO? stack, concat, diff make mutually exclusive - - # print("comparing:", timelines[0].name, timelines[1].name) - assert len(timelines) >= 2, "Less than 2 timelines given. 2 timelines are required to perform a diff" - - # TODO: test for empty timeline inputs, currently checks for existence of timeline but not of tracks in timeline - if len(timelines) == 2: - timelines = [diff_otio(timelines[0], timelines[1])] - if len(timelines) == 0: - print("no output timeline generated by diff") - else: - print("Warning: more than 2 timelines provided as input. Only the first two timelines will be diffed.") + diff_otio(timelines) # Phase 6: Remove/Redaction @@ -230,8 +220,10 @@ def parse_arguments(): 5.5 Diff The --diff option allows you to compare two OTIO files. It generates an - OTIO file annotated with the differences as well as a in console text - summary report. --diff can't be used concurrently with --stack or --concat + OTIO file annotated with the differences between their clips as well as a + text summary report in the console. Ordering of files given to --input matters + as diff compares the second file to the first. + --diff can't be used concurrently with --stack or --concat 6. Inspect Options such as --stats, --list-clips, --list-tracks, --list-media, @@ -476,7 +468,7 @@ def parse_arguments(): parser.add_argument( "--diff", action="store_true", - help="""Diff and compare two otio files. Input file type must be .otio""" + help="""Diff and compare two otio files. Input file type must be .otio and input file order matters""" ) # ================== @@ -520,8 +512,19 @@ def read_inputs(input_paths): # ======= NEW ======= -def diff_otio(tlA, tlB): - return getDiff.diff(tlA, tlB) +def diff_otio(timelines): + # TODO: check file format of timelines for OTIO + """Return an annotated timeline showing how clips changed from the first to the second timeline""" + assert len(timelines) >= 2, "Less than 2 timelines given. 2 timelines are required to perform a diff" + + if len(timelines) != 2: + print("Warning: more than 2 timelines provided as input. Only the first two timelines will be diffed.") + else: + timelines = [getDiff.diffTimelines(timelines[0], timelines[1])] + if len(timelines) == 0: + print("No output timeline generated by diff") + + return timelines # =================== From f86d739d268737f37e0e1ce0f0834e44a3f1f425 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 15 Aug 2025 14:01:49 -0700 Subject: [PATCH 20/30] fixed timeline output bug and return empty timeline instead of None Signed-off-by: Yingjie Wang --- .../opentimelineio/console/otiodiff/getDiff.py | 2 +- .../opentimelineio/console/otiotool.py | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 28ed2bd6c..b5063c573 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -36,7 +36,7 @@ def diffTimelines(timelineA, timelineB): makeTimelineSummary(timelineA, timelineB) - outputTimeline = None + outputTimeline = otio.schema.Timeline() # process video tracks, audio tracks, or both if hasVideo and hasAudio: videoClipTable = categorizeClipsByTracks(timelineA.video_tracks(), timelineB.video_tracks()) diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index c15901d81..cf778a718 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -121,7 +121,7 @@ def main(): if args.diff: # TODO? stack, concat, diff make mutually exclusive - diff_otio(timelines) + timelines = [diff_otio(timelines)] # Phase 6: Remove/Redaction @@ -510,8 +510,6 @@ def read_inputs(input_paths): timelines.append(timeline) return timelines -# ======= NEW ======= - def diff_otio(timelines): # TODO: check file format of timelines for OTIO """Return an annotated timeline showing how clips changed from the first to the second timeline""" @@ -520,13 +518,7 @@ def diff_otio(timelines): if len(timelines) != 2: print("Warning: more than 2 timelines provided as input. Only the first two timelines will be diffed.") else: - timelines = [getDiff.diffTimelines(timelines[0], timelines[1])] - if len(timelines) == 0: - print("No output timeline generated by diff") - - return timelines - -# =================== + return getDiff.diffTimelines(timelines[0], timelines[1]) def keep_only_video_tracks(timeline): """Remove all tracks except for video tracks from a timeline.""" From 2debbb69479a14a54ce8d031aaf8ca8984948089 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 15 Aug 2025 17:30:15 -0700 Subject: [PATCH 21/30] switched color in makeOtio to use OTIO Color rather than strings, added docstrings and updated variable/function naming Signed-off-by: Yingjie Wang --- .../console/otiodiff/getDiff.py | 4 +- .../console/otiodiff/makeOtio.py | 316 ++---------------- 2 files changed, 38 insertions(+), 282 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index b5063c573..80ab8b895 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -339,14 +339,14 @@ def makeNewOtio(clipTable, trackType): if trackType == otio.schema.Track.Kind.Video: newTl.tracks.extend(tracksInA) - newEmpty = makeOtio.makeEmptyTrack(trackType) + newEmpty = makeOtio.makeSeparaterTrack(trackType) newTl.tracks.append(newEmpty) newTl.tracks.extend(tracksInB) elif trackType == otio.schema.Track.Kind.Audio: newTl.tracks.extend(tracksInB) - newEmpty = makeOtio.makeEmptyTrack(trackType) + newEmpty = makeOtio.makeSeparaterTrack(trackType) newTl.tracks.append(newEmpty) newTl.tracks.extend(tracksInA) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 6631e9bfb..12b9cef47 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -2,72 +2,56 @@ import copy from .clipData import ClipData -# for debugging, put response into file -def toJson(file): - with open("clipDebug.json", "w") as f: - f.write(file) - -def toTimeline(tracks, timeline=None): - tl = timeline - - if tl is None: - tl = otio.schema.Timeline(name="timeline") - - for t in tracks: - tl.tracks.append(t) - - return tl - -def toOtio(file): - otio.adapters.write_to_file(file, "display.otio") - -# input is list of clipDatas, sorts them by start time on the timeline def sortClips(trackClips): + """Sort ClipDatas based on start time on the timeline""" # sort by clip start time in timeline return sorted(trackClips, key=lambda clipData: clipData.timeline_range.start_time.value) -# @params: clip: otio clip def addRavenColor(clip, color): - # print(clip.metadata) + """Add color of clip to metadata of raven so clips are correctly color-coded in raven viewer. + Specific to raven only.""" + + # parses name of color from otio.core.Color and puts into format that raven can read + color = color.name.upper() # TODO: if raven not in metadata, add empty dict if "raven" in clip.metadata: - clip.metadata["raven"]["color"] = color.upper() + clip.metadata["raven"]["color"] = color else: - colorData = {"color" : color.upper()} + colorData = {"color" : color} clip.metadata["raven"] = colorData - # debug - # toJson(otio.adapters.write_to_string(clip.metadata)) return clip def addMarker(newClip, color, clipData): + """Add marker of specified color and name to clip""" newMarker = otio.schema.Marker() newMarker.marked_range = clipData.source_range - color = color.upper() - newMarker.color = color + + # parses name of color from otio.core.Color and puts into format that markers can read + colorName = color.name.upper() + newMarker.color = colorName - if(color == "GREEN"): + if(colorName == "GREEN"): newMarker.name = "added" - elif(color == "PINK"): + elif(colorName == "PINK"): newMarker.name = "deleted" if isinstance(clipData, ClipData) and clipData.note is not None: # print("edit note added") newMarker.name = clipData.note - newClip.markers.append(newMarker) return newClip -# make new blank track that acts as a separator between the A and B sections -# TODO: make separater track -def makeEmptyTrack(trackType): +def makeSeparaterTrack(trackType): + """Make empty track that separates the timeline A tracks from the timeline B tracks""" return otio.schema.Track(name="=====================", kind=trackType) def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): + """Make OTIO track from ClipDatas with option to add markers and color to all clips on track""" # make new blank track with name of kind # print("make track of kind: ", trackKind) track = otio.schema.Track(name=trackName, kind=trackKind) @@ -99,7 +83,10 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) # add clip to track newClip = copy.deepcopy(clipData.source_clip) if clipColor is not None: + #testing newClip = addRavenColor(newClip, clipColor) + newClip.color = clipColor + # TODO: move out of if and make clipColor optional with default color if markersOn: newClip = addMarker(newClip, clipColor, clipData) @@ -107,14 +94,15 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) return track -# make tracks from timeline B def makeTrackB(clipGroup, trackNum, trackKind): - tAddV = makeTrack("added", trackKind, clipGroup.add, "GREEN") - tEditedV = makeTrack("edited", trackKind, clipGroup.edit, "ORANGE", markersOn=True) - tSameV = makeTrack("same", trackKind, clipGroup.same) - tMovedV = makeTrack("moved", trackKind, clipGroup.move, "PURPLE", markersOn=True) - - flatB = otio.core.flatten_stack([tSameV, tEditedV, tAddV, tMovedV]) + """Make an annotated track from timeline B. Shows added and edited clips as well as + clips that have moved between tracks.""" + tAdd = makeTrack("added", trackKind, clipGroup.add, otio.core.Color.GREEN) + tEdited = makeTrack("edited", trackKind, clipGroup.edit, otio.core.Color.ORANGE, markersOn=True) + tSame = makeTrack("same", trackKind, clipGroup.same) + tMoved = makeTrack("moved", trackKind, clipGroup.move, otio.core.Color.PURPLE, markersOn=True) + + flatB = otio.core.flatten_stack([tSame, tEdited, tAdd, tMoved]) if trackKind == otio.schema.Track.Kind.Video: flatB.name = "Video B" + str(trackNum) elif trackKind == otio.schema.Track.Kind.Audio: @@ -124,21 +112,22 @@ def makeTrackB(clipGroup, trackNum, trackKind): return flatB -# make tracks from timeline A def makeTrackA(clipGroup, trackNum, trackKind): - tSameV = makeTrack("same", trackKind, clipGroup.same) + """Make an annotated track from timeline A. Shows deleted clips and the original clips + corresponding to clips edited in timeline B.""" + tSame = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas prevEdited = [] prevMoved = [] for e in clipGroup.edit: prevEdited.append(e.matched_clipData) - tEditedV = makeTrack("edited", trackKind, prevEdited, "ORANGE") + tEdited = makeTrack("edited", trackKind, prevEdited, otio.core.Color.ORANGE) - tDelV = makeTrack("deleted", trackKind, clipGroup.delete, "PINK") + tDel = makeTrack("deleted", trackKind, clipGroup.delete, otio.core.Color.PINK) # TODO: explain the make sep then merge flatten tracks thing - flatA = otio.core.flatten_stack([tSameV, tEditedV, tDelV]) + flatA = otio.core.flatten_stack([tSame, tEdited, tDel]) # TODO: change video to directly use trackKind if trackKind == otio.schema.Track.Kind.Video: @@ -148,237 +137,4 @@ def makeTrackA(clipGroup, trackNum, trackKind): flatA.kind = trackKind - return flatA - -# def colorMovedA(tl, clipDB): -# # maybe make an extract all add/edit/move, etc from clipDB -# movedClips = [] -# for track in clipDB.keys(): -# movedClips.extend(clipDB[track]["move"]) - -# for m in movedClips: -# movedA = m.pair -# track = movedA.track_num - -# # find clip in new track that was created -# currentTrack = tl.tracks[track] -# clips = currentTrack.find_clips() -# if movedA.source in clips: -# print("found corresponding clip") -# # clipToColor = clips.index(movedA.source) - -# # print(clipToColor.name) - -# # tMovedV = makeTrack("moved", trackKind, prevMoved, "PURPLE", markersOn=True) - -def makeTimelineOfType(tlType, trackA, trackB, videoGroup, audioGroup=None): - newTl = None - - if tlType == "stack": - newTl = makeTimelineStack(trackA, trackB, videoGroup, audioGroup) - elif tlType == "inline": - newTl = makeTimelineInline(trackA, trackB, videoGroup, audioGroup) - elif tlType == "full": - newTl = makeTimelineFull(trackA, trackB, videoGroup, audioGroup) - elif tlType == "simple": - newTl = makeTimelineSimple(trackA, trackB, videoGroup, audioGroup) - else: - print("not a valid display type") - return newTl - -def makeTimelineStack(trackA, trackB, videoGroup, audioGroup=None): - # create new timeline with groups separated out into individual tracks - tl = otio.schema.Timeline(name="timeline") - - # append two original tracks - trackA.name = "Track A" + trackA.name - trackB.name = "Track B" + trackB.name - tl.tracks.append(copy.deepcopy(trackA)) - tl.tracks.append(copy.deepcopy(trackB)) - - tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE") - tSameV = makeTrack("same", "Video", videoGroup.same) - tDelV = makeTrack("deleted", "Video", videoGroup.delete, "RED") - - # append video tracks to timeline - tl.tracks.append(tDelV) - tl.tracks.append(tSameV) - tl.tracks.append(tEditedV) - tl.tracks.append(tAddV) - - # add audio tracks if present - if audioGroup is not None: - tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") - tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") - tSameA = makeTrack("same", "Audio", audioGroup.same) - tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") - - # append video tracks to timeline - tl.tracks.append(tAddA) - tl.tracks.append(tEditedA) - tl.tracks.append(tSameA) - tl.tracks.append(tDelA) - - return tl - -# note: flatten_stack doesn't work when there's transitions -def makeTimelineInline(trackA, trackB, clipGroup, audioGroup=None): - tl = otio.schema.Timeline(name="timeline") - - tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") - tSameV = makeTrack("same", "Video", clipGroup.same) - tDelV = makeTrack("deleted", "Video", clipGroup.delete, "RED") - - flat_videoA = otio.core.flatten_stack([copy.deepcopy(trackA), tDelV]) - flat_videoA.name = "VideoA" - tl.tracks.append(flat_videoA) - - flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) - flat_videoB.name = "VideoB" - tl.tracks.append(flat_videoB) - - # add audio tracks if present - if audioGroup is not None: - tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") - tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") - tSameA = makeTrack("same", "Audio", audioGroup.same) - tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") - - flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) - flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) - - flat_audioA.name = "AudioA" - flat_audioB.name = "AudioB" - flat_audioA.kind = "Audio" - flat_audioB.kind = "Audio" - - # append audio tracks to timeline - tl.tracks.append(flat_audioB) - tl.tracks.append(flat_audioA) - - return tl - -def makeDeletes(tl, tracksOfDeletes): - for t in tracksOfDeletes: - tl.tracks.insert(0, t) - return tl - -# note: flatten_stack doesn't work when there's transitions -def makeTimelineSimple(trackA, trackB, clipGroup, audioGroup=None): - tl = otio.schema.Timeline(name="timeline") - - tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") - tSameV = makeTrack("same", "Video", clipGroup.same) - tDelV = makeTrack("deleted", "Video", clipGroup.delete, "PINK") - - tl.tracks.append(tDelV) - - flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) - flat_videoB.name = "VideoB" - tl.tracks.append(flat_videoB) - - # commented out for now - # # add audio tracks if present - # if audioGroup is not None: - # tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") - # tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") - # tSameA = makeTrack("same", "Audio", audioGroup.same) - # tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") - - # flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) - # flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) - - # flat_audioA.name = "AudioA" - # flat_audioB.name = "AudioB" - # flat_audioA.kind = "Audio" - # flat_audioB.kind = "Audio" - - # # append audio tracks to timeline - # tl.tracks.append(flat_audioB) - # tl.tracks.append(flat_audioA) - - return tl - -def makeTimelineSplitDelete(trackA, trackB, clipGroup, audioGroup=None): - tl = otio.schema.Timeline(name="timeline") - - tAddV = makeTrack("added", "Video", clipGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", clipGroup.edit, "ORANGE") - tSameV = makeTrack("same", "Video", clipGroup.same) - tDelV = makeTrack("deleted", "Video", clipGroup.delete, "PINK") - - - for e in clipGroup.edit(): - pass - - - flat_videoB = otio.core.flatten_stack([tSameV, tEditedV, tAddV]) - flat_videoB.name = "VideoB" - tl.tracks.append(flat_videoB) - - # commented out for now - # # add audio tracks if present - # if audioGroup is not None: - # tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") - # tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") - # tSameA = makeTrack("same", "Audio", audioGroup.same) - # tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") - - # flat_audioA = otio.core.flatten_stack([tSameA, tDelA]) - # flat_audioB = otio.core.flatten_stack([tSameA, tEditedA, tAddA]) - - # flat_audioA.name = "AudioA" - # flat_audioB.name = "AudioB" - # flat_audioA.kind = "Audio" - # flat_audioB.kind = "Audio" - - # # append audio tracks to timeline - # tl.tracks.append(flat_audioB) - # tl.tracks.append(flat_audioA) - - return tl, tDelV - -def makeTimelineFull(trackA, trackB, videoGroup, audioGroup=None): - tl = otio.schema.Timeline(name="timeline") - - tlFlat = makeTimelineInline(trackA, trackB, videoGroup, audioGroup) - - tAddV = makeTrack("added", "Video", videoGroup.add, "GREEN") - tEditedV = makeTrack("edited", "Video", videoGroup.edit, "ORANGE") - tDelV = makeTrack("deleted", "Video", videoGroup.delete, "RED") - - # append video tracks to timeline - tl.tracks.append(tDelV) - - # temp testing - tl.tracks.append(copy.deepcopy(trackA)) - tl.tracks.append(copy.deepcopy(trackB)) - - # temp comment - # tlFlatVid = tlFlat.video_tracks() - # for v in tlFlatVid: - # tl.tracks.append(copy.deepcopy(v)) - - tl.tracks.append(tEditedV) - tl.tracks.append(tAddV) - - # add audio tracks if present - if audioGroup is not None: - tAddA = makeTrack("added", "Audio", audioGroup.add, "GREEN") - tEditedA = makeTrack("edited", "Audio", audioGroup.edit, "ORANGE") - tDelA = makeTrack("deleted", "Audio", audioGroup.delete, "RED") - - # append video tracks to timeline - tl.tracks.append(tAddA) - tl.tracks.append(tEditedA) - - tlFlatAud = tlFlat.audio_tracks() - for a in tlFlatAud: - tl.tracks.append(copy.deepcopy(a)) - - tl.tracks.append(tDelA) - - return tl + return flatA \ No newline at end of file From e7972f410e5dcd468fe3f38e2a7909fb38d34d40 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 15 Aug 2025 18:12:55 -0700 Subject: [PATCH 22/30] made color an optional parameter in addMarker and added examples for making tracks for A and B Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 2 +- .../console/otiodiff/makeOtio.py | 91 +++++++++++-------- 2 files changed, 54 insertions(+), 39 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 1033f5e2a..41428e375 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -78,7 +78,7 @@ def checkSame(self, cA): # Note: check in relation to left and right? # know if moved in seq rather than everything shifted over because of lengthen/shorten of other clips isSame = True - self.note = "moved" + self.note = "shifted laterally in track" else: # print("source range different", cA.name, self.name) # print(self.media_ref) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index 12b9cef47..a251f549d 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -4,39 +4,29 @@ def sortClips(trackClips): """Sort ClipDatas based on start time on the timeline""" - # sort by clip start time in timeline return sorted(trackClips, key=lambda clipData: clipData.timeline_range.start_time.value) def addRavenColor(clip, color): """Add color of clip to metadata of raven so clips are correctly color-coded in raven viewer. Specific to raven only.""" - # parses name of color from otio.core.Color and puts into format that raven can read color = color.name.upper() - # TODO: if raven not in metadata, add empty dict - - if "raven" in clip.metadata: - clip.metadata["raven"]["color"] = color - else: - colorData = {"color" : color} - clip.metadata["raven"] = colorData + if "raven" not in clip.metadata: + clip.metadata["raven"] = {"color" : None} + clip.metadata["raven"]["color"] = color return clip -def addMarker(newClip, color, clipData): +def addMarker(newClip, clipData, color=None): """Add marker of specified color and name to clip""" newMarker = otio.schema.Marker() newMarker.marked_range = clipData.source_range # parses name of color from otio.core.Color and puts into format that markers can read - colorName = color.name.upper() - newMarker.color = colorName - - if(colorName == "GREEN"): - newMarker.name = "added" - elif(colorName == "PINK"): - newMarker.name = "deleted" + if color is not None: + colorName = color.name.upper() + newMarker.color = colorName if isinstance(clipData, ClipData) and clipData.note is not None: # print("edit note added") @@ -46,14 +36,15 @@ def addMarker(newClip, color, clipData): return newClip +# TODO: make variables for add, edit, delete, move colors? + def makeSeparaterTrack(trackType): """Make empty track that separates the timeline A tracks from the timeline B tracks""" return otio.schema.Track(name="=====================", kind=trackType) def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): """Make OTIO track from ClipDatas with option to add markers and color to all clips on track""" - # make new blank track with name of kind - # print("make track of kind: ", trackKind) + # make new blank track with name and kind from parameters track = otio.schema.Track(name=trackName, kind=trackKind) # sort clips by start time in timeline @@ -87,54 +78,78 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) newClip = addRavenColor(newClip, clipColor) newClip.color = clipColor - # TODO: move out of if and make clipColor optional with default color - if markersOn: - newClip = addMarker(newClip, clipColor, clipData) + if markersOn: + newClip = addMarker(newClip, clipData, clipColor) track.append(newClip) return track def makeTrackB(clipGroup, trackNum, trackKind): """Make an annotated track from timeline B. Shows added and edited clips as well as - clips that have moved between tracks.""" + clips that have moved between tracks. + + Algorithm makes individual tracks for each clip category the track contains, + then flattens them to form the final track. Since blanks are left in all of the individual tracks, + flattening should allow all clips to simmply slot down into place on the flattened track + + Ex. track 1 has added and unchanged clips + Algorithm steps: + 1) Make a track containing only the unchanged clips of track 1 + 2) Make another track containing only the added clips of track 1 and color them green + 3) Flatten the added clips track on top of the unchanged clips track to create a track containing both + """ + + # for each category of clips, make an indivdual track and color code accordingly + tSame = makeTrack("same", trackKind, clipGroup.same) tAdd = makeTrack("added", trackKind, clipGroup.add, otio.core.Color.GREEN) tEdited = makeTrack("edited", trackKind, clipGroup.edit, otio.core.Color.ORANGE, markersOn=True) - tSame = makeTrack("same", trackKind, clipGroup.same) tMoved = makeTrack("moved", trackKind, clipGroup.move, otio.core.Color.PURPLE, markersOn=True) + # put all the tracks into a list and flatten them down to a single track that contains all the color-coded clips + flatB = otio.core.flatten_stack([tSame, tEdited, tAdd, tMoved]) + + # update track name and kind if trackKind == otio.schema.Track.Kind.Video: - flatB.name = "Video B" + str(trackNum) + flatB.name = trackKind + " B" + str(trackNum) elif trackKind == otio.schema.Track.Kind.Audio: - flatB.name = "Audio B" + str(trackNum) - + flatB.name = trackKind + " B" + str(trackNum) flatB.kind = trackKind return flatB def makeTrackA(clipGroup, trackNum, trackKind): """Make an annotated track from timeline A. Shows deleted clips and the original clips - corresponding to clips edited in timeline B.""" + corresponding to clips edited in timeline B. + + Algorithm makes individual tracks for each clip category the track contains, + then flattens them to form the final track. Since blanks are left in all of the individual tracks, + flattening should allow all clips to simmply slot down into place on the flattened track + + Ex. track 1 has deleted and unchanged clips + Algorithm steps: + 1) Make a track containing only the unchanged clips of track 1 + 2) Make another track containing only the deleted clips of track 1 and color them red + 3) Flatten the deleted clips track on top of the unchanged clips track to create a track containing both + """ + + # for each category of clips, make an indivdual track and color code accordingly tSame = makeTrack("same", trackKind, clipGroup.same) # grab the original pair from all the edit clipDatas - prevEdited = [] - prevMoved = [] for e in clipGroup.edit: prevEdited.append(e.matched_clipData) - tEdited = makeTrack("edited", trackKind, prevEdited, otio.core.Color.ORANGE) - + tEdited = makeTrack("edited", trackKind, prevEdited, otio.core.Color.ORANGE) tDel = makeTrack("deleted", trackKind, clipGroup.delete, otio.core.Color.PINK) - - # TODO: explain the make sep then merge flatten tracks thing + + # put all the tracks into a list and flatten them down to a single track that contains all the color-coded clips flatA = otio.core.flatten_stack([tSame, tEdited, tDel]) - # TODO: change video to directly use trackKind + # update track name and kind if trackKind == otio.schema.Track.Kind.Video: - flatA.name = "Video A" + str(trackNum) + flatA.name = trackKind + " A" + str(trackNum) elif trackKind == otio.schema.Track.Kind.Audio: - flatA.name = "Audio A" + str(trackNum) - + flatA.name = trackKind + " A" + str(trackNum) flatA.kind = trackKind return flatA \ No newline at end of file From 7e862e6050b12915b4b9848fcb5c81eddbeb4978 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 18 Aug 2025 11:13:58 -0700 Subject: [PATCH 23/30] added edit tests Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 2 +- tests/test_otiodiff.py | 148 ++++++++++++++---- 2 files changed, 121 insertions(+), 29 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 41428e375..1bb475dd9 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -132,7 +132,7 @@ def checkEdited(self, cA): # clip duration longer elif(selfDur.value > cADur.value): - self.note = "lengthened" + self.note = "lengthened " + deltaFramesStr + " frames" if(selfSourceStart.value == cASourceStart.value): self.note = "lengthened tail by " + deltaFramesStr + " frames" diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index d70f9467b..676d9f291 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -212,7 +212,7 @@ def test_check_same_if_move(self): clipDataB = ClipData(clipB, 1) assert clipDataB.checkSame(clipDataA) - assert clipDataB.note == "moved" + assert clipDataB.note == "shifted laterally in track" def test_check_not_same(self): # check that two clips with different names are not the same @@ -315,8 +315,8 @@ def test_check_not_same3(self): assert not clipDataB.checkSame(clipDataA) assert clipDataB.note is None - - def test_check_Edited(self): + + def test_check_edited_trimmed_head(self): # check for trim head/tail and lengthen head/tail clipA = otio.schema.Clip( name = "testName testTake", @@ -332,8 +332,8 @@ def test_check_Edited(self): name = "testName testTake", media_reference = otio.core.MediaReference( available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(90, 24))), + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), source_range = otio.opentime.TimeRange( otio.opentime.RationalTime(10, 24), otio.opentime.RationalTime(90, 24)), @@ -346,9 +346,115 @@ def test_check_Edited(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) + + + assert clipDataB.checkEdited(clipDataA) + print("note is:", clipDataB.note) + assert clipDataB.note == "trimmed head by 10 frames" + + def test_check_edited_trimmed_tail(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(90, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + + assert clipDataB.checkEdited(clipDataA) + assert clipDataB.note == "trimmed tail by 10 frames" + + def test_check_edited_lengthened_head(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) + + assert clipDataB.checkEdited(clipDataA) + print("note:", clipDataB.note) + assert clipDataB.note == "lengthened head by 10 frames" + + def test_check_edited_lengthened_tail(self): + # check for trim head/tail and lengthen head/tail + clipA = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), + ) + clipB = otio.schema.Clip( + name = "testName testTake", + media_reference = otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range = otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), + ) + trackA = otio.schema.Track() + trackB = otio.schema.Track() + + trackA.append(clipA) + trackB.append(clipB) + + clipDataA = ClipData(clipA, 1) + clipDataB = ClipData(clipB, 1) assert clipDataB.checkEdited(clipDataA) - assert clipDataB.note == "trimmed 10 frames" + assert clipDataB.note == "lengthened tail by 10 frames" class TestGetDif(unittest.TestCase): def test_find_clones(self): @@ -593,28 +699,14 @@ def test_sort_clones_clones_in_one_single_in_other(self): assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" -# class TestMakeOtio(unittest.TestCase): -# # Test the type parameter to makeTimelineOfType, but not the detailed results. -# def test_make_timeline_type(self): -# # SETUP -# trackA = otio.schema.Track() -# trackB = otio.schema.Track() -# pass - - # SortedClipDatas = namedtuple('VideoGroup', ['add', 'edit', 'same', 'delete']) - # videoGroup = SortedClipDatas([], [], [], []) - - # # EXERCISE - # tlStack = makeOtio.makeTimelineOfType("stack", trackA, trackB, videoGroup) - # tlInline = makeOtio.makeTimelineOfType("inline", trackA, trackB, videoGroup) - # tlFull = makeOtio.makeTimelineOfType("full", trackA, trackB, videoGroup) - # bogus = makeOtio.makeTimelineOfType("bogus", trackA, trackB, videoGroup) - - # # VERIFY - # assert(len(tlStack.tracks) == 6), "Number of tracks for stack display mode not matched" - # assert(len(tlInline.tracks) == 2), "Number of tracks for inline display mode not matched" - # assert(len(tlFull.tracks) == 5), "Number of tracks for full display mode not matched" - # assert(bogus is None), "Should have been invalid result" + # TODO: test case for timelines with unmatched track nums + # test case for timeline with matched track nums + +class TestMakeOtio(unittest.TestCase): + # TODO: test sort clips + + # test make track + pass if __name__ == '__main__': From c1bf13b45e84ccb6ffd51e8345ba729b228e0c9c Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Mon, 18 Aug 2025 15:40:25 -0700 Subject: [PATCH 24/30] added variables to specify color-coding, and updated formatting Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 65 +- .../console/otiodiff/getDiff.py | 208 ++++-- .../console/otiodiff/makeOtio.py | 98 ++- .../opentimelineio/console/otiotool.py | 22 +- tests/test_otiodiff.py | 706 +++++++++--------- 5 files changed, 607 insertions(+), 492 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 1bb475dd9..857bc3207 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -2,7 +2,10 @@ # TODO: clip comparable??? ClipInfo # source clip or clip ref? -# full name = name + version, name is just name, add ex, split on space, b4 is name, after is version +# full name = name + version, name is just name, +# add ex, split on space, b4 is name, after is version + + class ClipData: full_name = "" name = "" @@ -10,7 +13,7 @@ class ClipData: media_ref = None source_range = otio.opentime.TimeRange() timeline_range = otio.opentime.TimeRange() - track_num = None # not originally stored in otio.schema.Clip + track_num = None # not originally stored in otio.schema.Clip source_clip = otio.schema.Clip() # everything below holds comparison result info note = "" @@ -27,9 +30,9 @@ def __init__(self, source_clip, track_num, note=None): self.source_clip = source_clip self.note = note - # split full name into name of clip and version by white space # uses structure of "clipA v1" where clipA is the name and v1 is the version + def splitFullName(self, clip): shortName = clip.name.split(" ")[0] version = clip.name.split(" ")[1] if len(clip.name.split(" ")) > 1 else None @@ -40,19 +43,21 @@ def printData(self): print("name: ", self.name) print("version: ", self.version) print("media ref: ", self.media_ref) - print("source start time: ", self.source_range.start_time.value, " duration: ", self.source_range.duration.value) - print("timeline start time:", self.timeline_range.start_time.value, " duration: ", self.timeline_range.duration.value) - if(self.note != ""): + print("source start time: ", self.source_range.start_time.value, + " duration: ", self.source_range.duration.value) + print("timeline start time:", self.timeline_range.start_time.value, + " duration: ", self.timeline_range.duration.value) + if (self.note != ""): print("note: ", self.note) print("source clip: ", self.source.name) # compare truncated names def sameName(self, cA): - if(self.name.lower() == cA.name.lower()): + if (self.name.lower() == cA.name.lower()): return True else: return False - + # note: local and source duration should always match, can assume same # compare the duration within the timeline for 2 clips def sameDuration(self, cA): @@ -66,17 +71,19 @@ def checkSame(self, cA): if self.sameName(cA): # check source range is same # TODO: call trimmed range instead of source range ??? - # TODO: make test where has null source range -> see things break, then go back and change <- low priority - if(self.source_range == cA.source_range): + # TODO: make test where has null source range -> see things break, + # then go back and change <- low priority + if (self.source_range == cA.source_range): # print(self.name, " ", self.timeline_range, " ", cA.timeline_range) # check in same place on timeline - if(self.timeline_range == cA.timeline_range): + if (self.timeline_range == cA.timeline_range): isSame = True # check duration is same but not necessarily in same place on timeline # TODO: change to else? (does the elif always run?) - elif(self.sameDuration(cA)): + elif (self.sameDuration(cA)): # Note: check in relation to left and right? - # know if moved in seq rather than everything shifted over because of lengthen/shorten of other clips + # know if moved in seq rather than everything shifted over + # because of lengthen/shorten of other clips isSame = True self.note = "shifted laterally in track" else: @@ -86,15 +93,17 @@ def checkSame(self, cA): pass return isSame - - # compare 2 clips and see if they have been + + # compare 2 clips and see if they have been # compare self: "new", to old def checkEdited(self, cA): isEdited = False # Note: assumption that source range and timeline range duration always equal - # assert(self.source_range.duration.value == self.timeline_range.duration.value), "clip source range and timeline range durations don't match" - # assert(cA.source_range.duration.value == cA.timeline_range.duration.value), "clip source range and timeline range durations don't match" + # assert(self.source_range.duration.value == self.timeline_range.duration.value + # ), "clip source range and timeline range durations don't match" + # assert(cA.source_range.duration.value == cA.timeline_range.duration.value + # ), "clip source range and timeline range durations don't match" selfDur = self.source_range.duration cADur = cA.source_range.duration @@ -109,34 +118,34 @@ def checkEdited(self, cA): # # self.printData() # # cA.printData() # self.note = "source range start times differ" - # isEdited = True + # isEdited = True - if(self.source_range != cA.source_range): + if (self.source_range != cA.source_range): self.note = "source range changed" isEdited = True deltaFramesStr = str(abs(selfDur.to_frames() - cADur.to_frames())) - if(selfDur.value == cADur.value): + if (selfDur.value == cADur.value): self.note = "start time in source range changed" # put note assignment into function, return note? # self, other, olderClipData rather than cA # clip duration shorter - elif(selfDur.value < cADur.value): + elif (selfDur.value < cADur.value): self.note = "trimmed " + deltaFramesStr + " frames" - - if(selfSourceStart.value == cASourceStart.value): + + if (selfSourceStart.value == cASourceStart.value): self.note = "trimmed tail by " + deltaFramesStr + " frames" - elif(selfSourceStart.value < cASourceStart.value): + elif (selfSourceStart.value < cASourceStart.value): self.note = "trimmed head by " + deltaFramesStr + " frames" # clip duration longer - elif(selfDur.value > cADur.value): + elif (selfDur.value > cADur.value): self.note = "lengthened " + deltaFramesStr + " frames" - if(selfSourceStart.value == cASourceStart.value): + if (selfSourceStart.value == cASourceStart.value): self.note = "lengthened tail by " + deltaFramesStr + " frames" - elif(selfSourceStart.value > cASourceStart.value): + elif (selfSourceStart.value > cASourceStart.value): self.note = "lengthened head by " + deltaFramesStr + " frames" - return isEdited \ No newline at end of file + return isEdited diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 80ab8b895..b0445c069 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -1,5 +1,3 @@ -import argparse -import os import copy from collections import namedtuple @@ -8,17 +6,23 @@ from .clipData import ClipData from . import makeOtio + def diffTimelines(timelineA, timelineB): - '''Diff two OTIO timelines and identify how clips on video and/or audio tracks changed from timeline A to timeline B. - Return an annotated otio file with the differences and print a text summary to console. - + '''Diff two OTIO timelines and identify how clips on video and/or audio tracks + changed from timeline A to timeline B. + Return an annotated otio file with the differences and print a text summary + to console. + Parameters: - timelineA (otio.schema.Timeline()): timeline from the file you want to compare against, ex. clip1 version 1 - timelineB (otio.schema.Timeline()): timeline from the file you want to compare, ex. clip1 version 2 - + timelineA (otio.schema.Timeline()): timeline from the file you want to + compare against, ex. clip1 version 1 + timelineB (otio.schema.Timeline()): timeline from the file you want to + compare, ex. clip1 version 2 + Returns: - outputTimeline (otio.schema.Timeline()): timeline with color coded clips and marker annotations showing the - differences between the input tracks with the tracks from timeline B stacked on top of timeline A + outputTimeline (otio.schema.Timeline()): timeline with color coded clips + and marker annotations showing the differences between the input tracks + with the tracks from timeline B stacked on top of timeline A ''' hasVideo = False hasAudio = False @@ -39,8 +43,10 @@ def diffTimelines(timelineA, timelineB): outputTimeline = otio.schema.Timeline() # process video tracks, audio tracks, or both if hasVideo and hasAudio: - videoClipTable = categorizeClipsByTracks(timelineA.video_tracks(), timelineB.video_tracks()) - audioClipTable = categorizeClipsByTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) + videoClipTable = categorizeClipsByTracks( + timelineA.video_tracks(), timelineB.video_tracks()) + audioClipTable = categorizeClipsByTracks( + timelineA.audio_tracks(), timelineB.audio_tracks()) makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "perTrack") makeSummary(audioClipTable, otio.schema.Track.Kind.Audio, "summary") @@ -50,14 +56,16 @@ def diffTimelines(timelineA, timelineB): # combine for t in videoTl.tracks: outputTimeline.tracks.append(copy.deepcopy(t)) - + elif hasVideo: - videoClipTable = categorizeClipsByTracks(timelineA.video_tracks(), timelineB.video_tracks()) + videoClipTable = categorizeClipsByTracks( + timelineA.video_tracks(), timelineB.video_tracks()) makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") outputTimeline = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) elif hasAudio: - audioClipTable = categorizeClipsByTracks(timelineA.audio_tracks(), timelineB.audio_tracks()) + audioClipTable = categorizeClipsByTracks( + timelineA.audio_tracks(), timelineB.audio_tracks()) makeSummary(audioClipTable, "Audio", "summary") outputTimeline = makeNewOtio(audioClipTable, otio.schema.Track.Kind.Audio) @@ -73,9 +81,12 @@ def diffTimelines(timelineA, timelineB): return outputTimeline # TODO: make nonClones a set rather than a list + + def findClones(clips): - """Separate the cloned ClipDatas (ones that share the same name) from the unique ClipDatas and return both - + """Separate the cloned ClipDatas (ones that share the same name) from the + unique ClipDatas and return both + Paramenters: clips (list of ClipDatas): list of ClipDatas @@ -83,7 +94,7 @@ def findClones(clips): clones (dictionary): dictionary of all clones in the group of ClipDatas keys: name of clone values: list of ClipDatas of that name - nonClones (list): list of unique clones in group of ClipDatas\ + nonClones (list): list of unique clones in group of ClipDatas """ clones = {} @@ -92,7 +103,7 @@ def findClones(clips): for c in clips: names.append(c.name) - + for c in clips: if c.name in clones: clones[c.name].append(c) @@ -103,8 +114,10 @@ def findClones(clips): return clones, nonClones + def sortClones(clipDatasA, clipDatasB): - """Identify cloned ClipDatas (ones that share the same name) across two groups of ClipDatas and separate from the unique + """Identify cloned ClipDatas (ones that share the same name) across two + groups of ClipDatas and separate from the unique ClipDatas (ones that only appear once in each group)""" # find cloned clips and separate out from unique clips clonesA, nonClonesA = findClones(clipDatasA) @@ -128,8 +141,10 @@ def sortClones(clipDatasA, clipDatasB): # clipCountB = 0 return (clonesA, nonClonesA), (clonesB, nonClonesB) + def compareClones(clonesA, clonesB): - """Compare two groups of cloned ClipDatas and categorize into added, unchanged, or deleted""" + """Compare two groups of cloned ClipDatas and categorize into + added, unchanged, or deleted""" added = [] unchanged = [] deleted = [] @@ -141,9 +156,12 @@ def compareClones(clonesA, clonesB): if nameB not in clonesA: added.extend(clonesB[nameB]) - # name matched, there exists clones in both A and B, check if there are same clips - # technically can be the first one is "edited" and the rest are "added"/"deleted" -> depends on how want to define - # currently, all clones that aren't the exact same get categorized as either "added" or "deleted" + # name matched, there exists clones in both A and B, check if there are + # same clips + # technically can be the first one is "edited" and the rest are + # "added"/"deleted" -> depends on how want to define + # currently, all clones that aren't the exact same get categorized as \ + # either "added" or "deleted" else: clipsA = clonesA[nameB] clipsB = clonesB[nameB] @@ -151,12 +169,12 @@ def compareClones(clonesA, clonesB): for clipB in clipsB: for clipA in clipsA: isSame = clipB.checkSame(clipA) - if(isSame): + if (isSame): unchanged.append(clipB) else: - if(clipB not in added): + if (clipB not in added): added.append(clipB) - if(clipA not in deleted): + if (clipA not in deleted): deleted.append(clipA) # same as above for deleted clips @@ -165,11 +183,13 @@ def compareClones(clonesA, clonesB): deleted.extend(clonesA[nameA]) # print("from clones added: ", len(added), " deleted: ", len(deleted)) - + return added, unchanged, deleted + def compareClips(clipDatasA, clipDatasB): - """Compare two groups of unique ClipDatas and categorize into added, edited, unchanged, and deleted""" + """Compare two groups of unique ClipDatas and categorize into + added, edited, unchanged, and deleted""" namesA = {} namesB = {} @@ -184,18 +204,18 @@ def compareClips(clipDatasA, clipDatasB): namesB[c.name] = c for cB in clipDatasB: - + if cB.name not in namesA: added.append(cB) else: cB.matched_clipData = namesA[cB.name] isSame = cB.checkSame(cB.matched_clipData) - if(isSame): + if (isSame): # cB.pair = namesA[cB.name] unchanged.append(cB) else: isEdited = cB.checkEdited(cB.matched_clipData) - if(isEdited): + if (isEdited): # cB.matched_clipData = namesA[cB.name] edited.append(cB) else: @@ -214,8 +234,10 @@ def compareClips(clipDatasA, clipDatasB): # TODO: some can be sets instead of lists return added, edited, unchanged, deleted + def compareTracks(trackA, trackB, trackNum): - """Compare clipis in two OTIO tracks and categorize into added, edited, same, and deleted""" + """Compare clipis in two OTIO tracks and categorize into + added, edited, same, and deleted""" clipDatasA = [] clipDatasB = [] @@ -223,7 +245,7 @@ def compareTracks(trackA, trackB, trackNum): # put clip info into ClipData cd = ClipData(c, trackNum) clipDatasA.append(cd) - + for c in trackB.find_clips(): # put clip info into ClipData cd = ClipData(c, trackNum) @@ -236,7 +258,7 @@ def compareTracks(trackA, trackB, trackNum): edited = [] unchanged = [] deleted = [] - + # compare and categorize unique clips added, edited, unchanged, deleted = compareClips(nonClonesA, nonClonesB) @@ -249,13 +271,16 @@ def compareTracks(trackA, trackB, trackNum): return added, edited, unchanged, deleted # TODO? account for move edit, currently only identifies strictly moved + + def checkMoved(allDel, allAdd): """Identify ClipDatas that have moved between different tracks""" # ones found as same = moved # ones found as edited = moved and edited # wanted to compare full names to account for dif dep/take - # otherwise shotA (layout123) and shotA (anim123) would count as a move and not as add + # otherwise shotA (layout123) and shotA (anim123) would count as a move and + # not as add # TODO: maybe preserve full name and also clip name, ex. id and name # TODO: fix compareClips so that it allows check by full name for c in allDel: @@ -265,7 +290,8 @@ def checkMoved(allDel, allAdd): newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) # removes clips that are moved in same track, just keep clips moved between tracks - moved = [clip for clip in moved if clip.track_num != clip.matched_clipData.track_num] + moved = [clip for clip in moved if clip.track_num != + clip.matched_clipData.track_num] for clip in moved: clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) @@ -276,8 +302,10 @@ def checkMoved(allDel, allAdd): return newAdd, moveEdit, moved, newDel + def sortMoved(clipTable): - """Put ClipDatas that have moved between tracks into their own category and remove from their previous category""" + """Put ClipDatas that have moved between tracks into their own category and + remove from their previous category""" allAdd = [] allEdit = [] allSame = [] @@ -299,25 +327,27 @@ def sortMoved(clipTable): add, moveEdit, moved, delete = checkMoved(allDel, allAdd) - # currently moved clips are still marked as delete in timelineA + # currently moved clips are still marked as delete in timelineA for cd in moved: clipTable[cd.track_num]["add"].remove(cd) clipTable[cd.track_num]["move"].append(cd) # clipTable[cd.track_num]["delete"].remove(cd) # clipTable[cd.pair.track_num]["moved"].append(cd.pair) - + return clipTable + def makeNewOtio(clipTable, trackType): - """Make a new annotated OTIO timeline showing the change from timeline A to timeline B, with the tracks - from timeline B stacked on top of the tracks from timeline A - + """Make a new annotated OTIO timeline showing the change from timeline A to + timeline B, with the tracks from timeline B stacked on top of + the tracks from timeline A + Ex. New timeline showing the differences of timeline A and B with 2 tracks each Track 2B Track 1B ======== Track 2A - Track 1A + Track 1A """ newTl = otio.schema.Timeline(name="diffed") tracksInA = [] @@ -326,29 +356,35 @@ def makeNewOtio(clipTable, trackType): # make tracks A and B in output timeline for trackNum in clipTable.keys(): # use named tuple here since clip categories won't change anymore - SortedClipDatas = namedtuple('ClipGroup', ['add', 'edit', 'same', 'delete', 'move']) - clipGroup = SortedClipDatas(clipTable[trackNum]["add"], clipTable[trackNum]["edit"], clipTable[trackNum]["same"], clipTable[trackNum]["delete"], clipTable[trackNum]["move"]) + SortedClipDatas = namedtuple( + 'ClipGroup', ['add', 'edit', 'same', 'delete', 'move']) + clipGroup = SortedClipDatas(clipTable[trackNum]["add"], + clipTable[trackNum]["edit"], + clipTable[trackNum]["same"], + clipTable[trackNum]["delete"], + clipTable[trackNum]["move"]) newTrackA = makeOtio.makeTrackA(clipGroup, trackNum, trackType) - tracksInA.append(newTrackA) + tracksInA.append(newTrackA) newTrackB = makeOtio.makeTrackB(clipGroup, trackNum, trackType) tracksInB.append(newTrackB) - # write order to output timeline so that timeline B is on top for both video and audio + # write order to output timeline so that timeline B is on top for both + # video and audio if trackType == otio.schema.Track.Kind.Video: newTl.tracks.extend(tracksInA) newEmpty = makeOtio.makeSeparaterTrack(trackType) newTl.tracks.append(newEmpty) - + newTl.tracks.extend(tracksInB) elif trackType == otio.schema.Track.Kind.Audio: newTl.tracks.extend(tracksInB) newEmpty = makeOtio.makeSeparaterTrack(trackType) newTl.tracks.append(newEmpty) - + newTl.tracks.extend(tracksInA) # makeOtio.colorMovedA(newTl, clipTable) @@ -357,22 +393,27 @@ def makeNewOtio(clipTable, trackType): # TODO: rename to create bucket/cat/db/stuff; categorizeClipsByTracks + comment + def categorizeClipsByTracks(tracksA, tracksB): - """Compare the clips in each track in tracksB against the corresponding track in tracksA - and categorize based on how they have changed. Return a dictionary table of ClipDatas - categorized by added, edited, unchanged, deleted, and moved and ordered by track. - + """Compare the clips in each track in tracksB against the corresponding track + in tracksA and categorize based on how they have changed. + Return a dictionary table of ClipDatas categorized by + added, edited, unchanged, deleted, and moved and ordered by track. + Parameters: tracksA (list of otio.schema.Track() elements): list of tracks from timeline A tracksB (list of otio.schema.Track() elements): list of tracks from timeline B Returns: - clipTable (dictionary): dictionary holding categorized ClipDatas, organized by the track number of the ClipDatas + clipTable (dictionary): dictionary holding categorized ClipDatas, organized + by the track number of the ClipDatas dictionary keys: track number (int) - dictionary values: dictionary holding categorized ClipDatas of that track + dictionary values: dictionary holding categorized + ClipDatas of that track nested dictionary keys: category name (string) - nested dictionary values: list of ClipDatas that fall into the category - + nested dictionary values: list of ClipDatas that fall + into the category + ex: clipTable when tracksA and tracksB contain 3 tracks {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} @@ -381,7 +422,7 @@ def categorizeClipsByTracks(tracksA, tracksB): clipTable = {} # TODO? ^change to class perhaps? low priority - + shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) @@ -416,7 +457,7 @@ def categorizeClipsByTracks(tracksA, tracksB): added.append(cd) clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} - + else: for i in range(len(shorterTlTracks), len(tracksA)): newTrack = tracksA[i] @@ -435,7 +476,8 @@ def categorizeClipsByTracks(tracksA, tracksB): # tracksInA, tracksInB = makeNewOtio(clipTable, trackType) return clipTable - + + def makeSummary(clipTable, trackType, mode): """Summarize what clips got changed and how they changed and print to console.""" @@ -454,11 +496,20 @@ def makeSummary(clipTable, trackType, mode): for track in clipTable.keys(): clipGroup = clipTable[track] - allAdd.extend(clipGroup["add"]) if "add" in clipGroup.keys() else print("no add") - allDel.extend(clipGroup["delete"]) if "delete" in clipGroup.keys() else print("no del") - allSame.extend(clipGroup["same"]) if "same" in clipGroup.keys() else print("no same") - allEdit.extend(clipGroup["edit"]) if "edit" in clipGroup.keys() else print("no edit") - allMove.extend(clipGroup["move"]) if "move" in clipGroup.keys() else print("no move") + allAdd.extend(clipGroup["add"] + ) if "add" in clipGroup.keys() else print("no add") + allDel.extend( + clipGroup["delete"]) if "delete" in clipGroup.keys() \ + else print("no del") + allSame.extend( + clipGroup["same"]) if "same" in clipGroup.keys() \ + else print("no same") + allEdit.extend( + clipGroup["edit"]) if "edit" in clipGroup.keys() \ + else print("no edit") + allMove.extend( + clipGroup["move"]) if "move" in clipGroup.keys() \ + else print("no move") print("total added:", len(allAdd)) print("total edited:", len(allEdit)) @@ -475,9 +526,11 @@ def makeSummary(clipTable, trackType, mode): print(cat.upper(), ":", len(clipGroup[cat])) if cat != "same": for i in clipGroup[cat]: - print(i.name + ": " + i.note) if i.note is not None else print(i.name) + print(i.name + ": " + i.note) if i.note is not None \ + else print(i.name) print("") + def makeTimelineSummary(timelineA, timelineB): """Summarize information about the two timelines compared and print to console.""" print("Comparing Timeline B:", timelineB.name, "vs") @@ -495,23 +548,28 @@ def makeTimelineSummary(timelineA, timelineB): print("No audio tracks in B") # compare overall file duration - if(timelineB.duration() > timelineA.duration()): + if (timelineB.duration() > timelineA.duration()): delta = timelineB.duration().to_seconds() - timelineA.duration().to_seconds() print(f"Timeline duration increased by {delta:.2f} seconds") - elif(timelineB.duration() < timelineA.duration()): + elif (timelineB.duration() < timelineA.duration()): delta = timelineA.duration().to_seconds() - timelineB.duration().to_seconds() - print(f"Timeline duration decreased by {delta:.2f} seconds") + print(f"Timeline duration decreased by {delta:.2f} seconds") else: print("Timeline duration did not change") - print("") + print("") + ''' ======= Notes ======= Test shot simple: - /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio + /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio + /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio Test seq matching edit's skywalker: - /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ChangeNotes.Relinked.01.otio /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio + /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ + ChangeNotes.Relinked.01.otio + /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio Test shot multitrack: - /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio -''' \ No newline at end of file + /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio + /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio +''' diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index a251f549d..bfab84a32 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -1,35 +1,47 @@ import opentimelineio as otio import copy from .clipData import ClipData +from opentimelineio.core import Color + +# color-coding for clips in output timeline +addedClipsColor = Color.GREEN +editedClipsColor = Color.ORANGE +deletedClipsColor = Color.PINK +movedClipsColor = Color.PURPLE + def sortClips(trackClips): """Sort ClipDatas based on start time on the timeline""" - return sorted(trackClips, key=lambda clipData: clipData.timeline_range.start_time.value) + return sorted(trackClips, + key=lambda clipData: clipData.timeline_range.start_time.value) + def addRavenColor(clip, color): - """Add color of clip to metadata of raven so clips are correctly color-coded in raven viewer. - Specific to raven only.""" - # parses name of color from otio.core.Color and puts into format that raven can read + """Add color of clip to metadata of raven so clips are correctly + color-coded in raven viewer. Specific to raven only.""" + # parses name of color from otio.core.Color and puts into + # format that raven can read color = color.name.upper() if "raven" not in clip.metadata: - clip.metadata["raven"] = {"color" : None} + clip.metadata["raven"] = {"color": None} clip.metadata["raven"]["color"] = color - + return clip + def addMarker(newClip, clipData, color=None): """Add marker of specified color and name to clip""" newMarker = otio.schema.Marker() newMarker.marked_range = clipData.source_range - - # parses name of color from otio.core.Color and puts into format that markers can read + + # parses name of color from otio.core.Color and puts into + # format that markers can read if color is not None: colorName = color.name.upper() newMarker.color = colorName if isinstance(clipData, ClipData) and clipData.note is not None: - # print("edit note added") newMarker.name = clipData.note newClip.markers.append(newMarker) @@ -38,12 +50,16 @@ def addMarker(newClip, clipData, color=None): # TODO: make variables for add, edit, delete, move colors? + def makeSeparaterTrack(trackType): - """Make empty track that separates the timeline A tracks from the timeline B tracks""" + """Make empty track that separates the timeline A tracks + from the timeline B tracks""" return otio.schema.Track(name="=====================", kind=trackType) + def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): - """Make OTIO track from ClipDatas with option to add markers and color to all clips on track""" + """Make OTIO track from ClipDatas with option to add markers + and color to all clips on track""" # make new blank track with name and kind from parameters track = otio.schema.Track(name=trackName, kind=trackKind) @@ -61,9 +77,9 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) delta = tlStart - currentEnd - if(delta > 0): + if (delta > 0): gapDur = otio.opentime.RationalTime(delta, tlRate) - gap = otio.schema.Gap(duration = gapDur) + gap = otio.schema.Gap(duration=gapDur) track.append(gap) currentEnd = tlStart + tlDuration @@ -74,7 +90,7 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) # add clip to track newClip = copy.deepcopy(clipData.source_clip) if clipColor is not None: - #testing + # testing newClip = addRavenColor(newClip, clipColor) newClip.color = clipColor @@ -84,31 +100,38 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) return track + def makeTrackB(clipGroup, trackNum, trackKind): - """Make an annotated track from timeline B. Shows added and edited clips as well as - clips that have moved between tracks. - + """Make an annotated track from timeline B. Shows added and edited clips + as well as clips that have moved between tracks. + Algorithm makes individual tracks for each clip category the track contains, - then flattens them to form the final track. Since blanks are left in all of the individual tracks, - flattening should allow all clips to simmply slot down into place on the flattened track + then flattens them to form the final track. Since blanks are left in all of + the individual tracks, flattening should allow all clips to simply + slot down into place on the flattened track Ex. track 1 has added and unchanged clips Algorithm steps: 1) Make a track containing only the unchanged clips of track 1 - 2) Make another track containing only the added clips of track 1 and color them green - 3) Flatten the added clips track on top of the unchanged clips track to create a track containing both + 2) Make another track containing only the added clips of track 1 and color + them green + 3) Flatten the added clips track on top of the unchanged clips track to + create a track containing both """ # for each category of clips, make an indivdual track and color code accordingly tSame = makeTrack("same", trackKind, clipGroup.same) - tAdd = makeTrack("added", trackKind, clipGroup.add, otio.core.Color.GREEN) - tEdited = makeTrack("edited", trackKind, clipGroup.edit, otio.core.Color.ORANGE, markersOn=True) - tMoved = makeTrack("moved", trackKind, clipGroup.move, otio.core.Color.PURPLE, markersOn=True) + tAdd = makeTrack("added", trackKind, clipGroup.add, addedClipsColor) + tEdited = makeTrack("edited", trackKind, clipGroup.edit, + editedClipsColor, markersOn=True) + tMoved = makeTrack("moved", trackKind, clipGroup.move, + movedClipsColor, markersOn=True) - # put all the tracks into a list and flatten them down to a single track that contains all the color-coded clips + # put all the tracks into a list and flatten them down to a single track + # that contains all the color-coded clips flatB = otio.core.flatten_stack([tSame, tEdited, tAdd, tMoved]) - + # update track name and kind if trackKind == otio.schema.Track.Kind.Video: flatB.name = trackKind + " B" + str(trackNum) @@ -118,19 +141,23 @@ def makeTrackB(clipGroup, trackNum, trackKind): return flatB + def makeTrackA(clipGroup, trackNum, trackKind): """Make an annotated track from timeline A. Shows deleted clips and the original clips corresponding to clips edited in timeline B. Algorithm makes individual tracks for each clip category the track contains, - then flattens them to form the final track. Since blanks are left in all of the individual tracks, - flattening should allow all clips to simmply slot down into place on the flattened track + then flattens them to form the final track. Since blanks are left in all of + the individual tracks, flattening should allow all clips to simmply slot down + into place on the flattened track Ex. track 1 has deleted and unchanged clips Algorithm steps: 1) Make a track containing only the unchanged clips of track 1 - 2) Make another track containing only the deleted clips of track 1 and color them red - 3) Flatten the deleted clips track on top of the unchanged clips track to create a track containing both + 2) Make another track containing only the deleted clips of track 1 and color + them red + 3) Flatten the deleted clips track on top of the unchanged clips track + to create a track containing both """ # for each category of clips, make an indivdual track and color code accordingly @@ -139,10 +166,11 @@ def makeTrackA(clipGroup, trackNum, trackKind): prevEdited = [] for e in clipGroup.edit: prevEdited.append(e.matched_clipData) - tEdited = makeTrack("edited", trackKind, prevEdited, otio.core.Color.ORANGE) - tDel = makeTrack("deleted", trackKind, clipGroup.delete, otio.core.Color.PINK) - - # put all the tracks into a list and flatten them down to a single track that contains all the color-coded clips + tEdited = makeTrack("edited", trackKind, prevEdited, editedClipsColor) + tDel = makeTrack("deleted", trackKind, clipGroup.delete, deletedClipsColor) + + # put all the tracks into a list and flatten them down to a single track + # that contains all the color-coded clips flatA = otio.core.flatten_stack([tSame, tEdited, tDel]) # update track name and kind @@ -152,4 +180,4 @@ def makeTrackA(clipGroup, trackNum, trackKind): flatA.name = trackKind + " A" + str(trackNum) flatA.kind = trackKind - return flatA \ No newline at end of file + return flatA diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index d8262ef03..e9da4e3f5 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -27,8 +27,8 @@ from .otiodiff import getDiff + def main(): - """otiotool main program. This function is responsible for executing the steps specified by all of the command line arguments in the right order. @@ -223,7 +223,7 @@ def parse_arguments(): 5.5 Diff The --diff option allows you to compare two OTIO files. It generates an - OTIO file annotated with the differences between their clips as well as a + OTIO file annotated with the differences between their clips as well as a text summary report in the console. Ordering of files given to --input matters as diff compares the second file to the first. --diff can't be used concurrently with --stack or --concat @@ -254,7 +254,8 @@ def parse_arguments(): Inspect specific audio clips in detail: otiotool -i playlist.otio --audio-only --list-tracks --inspect "Interview" -Diff fileB against fileA (ordering matters where fileA is the file fileB compares against): +Diff fileB against fileA +(ordering matters where fileA is the file fileBcompares against): otiotool -i fileA.otio fileB.otio --diff --o display.otio """, formatter_class=argparse.RawDescriptionHelpFormatter @@ -476,7 +477,8 @@ def parse_arguments(): parser.add_argument( "--diff", action="store_true", - help="""Diff and compare two otio files. Input file type must be .otio and input file order matters""" + help="""Diff and compare two otio files. Input file type must be .otio + and input file order matters""" ) # ================== @@ -518,16 +520,22 @@ def read_inputs(input_paths): timelines.append(timeline) return timelines + def diff_otio(timelines): # TODO: check file format of timelines for OTIO - """Return an annotated timeline showing how clips changed from the first to the second timeline""" - assert len(timelines) >= 2, "Less than 2 timelines given. 2 timelines are required to perform a diff" + """Return an annotated timeline showing how clips changed from the first to + the second timeline""" + assert len( + timelines) >= 2, "Less than 2 timelines given. 2 timelines are required" + " to perform a diff" if len(timelines) != 2: - print("Warning: more than 2 timelines provided as input. Only the first two timelines will be diffed.") + print("Warning: more than 2 timelines provided as input. Only the first" + " two timelines will be diffed.") else: return getDiff.diffTimelines(timelines[0], timelines[1]) + def keep_only_video_tracks(timeline): """Remove all tracks except for video tracks from a timeline.""" timeline.tracks[:] = timeline.video_tracks() diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index 676d9f291..6a1aa4e95 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -2,10 +2,10 @@ import opentimelineio as otio from opentimelineio.console.otiodiff.clipData import ClipData -import opentimelineio.console.otiodiff.makeOtio as makeOtio +# import opentimelineio.console.otiodiff.makeOtio as makeOtio import opentimelineio.console.otiodiff.getDiff as getDiff -from collections import namedtuple +# from collections import namedtuple class TestClipData(unittest.TestCase): @@ -13,24 +13,24 @@ class TestClipData(unittest.TestCase): def test_same_name(self): clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -45,24 +45,24 @@ def test_same_name(self): def test_different_name(self): clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName2 testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName2 testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -75,28 +75,27 @@ def test_different_name(self): assert not clipDataB.sameName(clipDataA) - def test_same_duration(self): # check that length of clip is the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -108,28 +107,28 @@ def test_same_duration(self): clipDataB = ClipData(clipB, 1) assert clipDataB.sameDuration(clipDataA) - + def test_different_duration(self): # check that length of clip is the different clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(20, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -142,29 +141,28 @@ def test_different_duration(self): assert not clipDataB.sameDuration(clipDataA) - def test_check_same(self): # check that two exact same clips are the same # check that two exact same clips but moved in the timeline are the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -180,30 +178,30 @@ def test_check_same(self): def test_check_same_if_move(self): # check that two exact same clips but moved in the timeline are the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() gapDur = otio.opentime.RationalTime(5, 24) - gap = otio.schema.Gap(duration = gapDur) + gap = otio.schema.Gap(duration=gapDur) trackA.append(clipA) trackB.extend([gap, clipB]) @@ -213,28 +211,28 @@ def test_check_same_if_move(self): assert clipDataB.checkSame(clipDataA) assert clipDataB.note == "shifted laterally in track" - + def test_check_not_same(self): # check that two clips with different names are not the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName2 testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName2 testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -249,26 +247,27 @@ def test_check_not_same(self): assert clipDataB.note is None def test_check_not_same2(self): - # check that two clips with different source range start durations are not the same + # check that two clips with different source range + # start durations are not the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(20, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -278,31 +277,31 @@ def test_check_not_same2(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - + assert not clipDataB.checkSame(clipDataA) assert clipDataB.note is None def test_check_not_same3(self): # check that two clips with different source range start times are not the same clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -312,31 +311,31 @@ def test_check_not_same3(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - + assert not clipDataB.checkSame(clipDataA) assert clipDataB.note is None - + def test_check_edited_trimmed_head(self): # check for trim head/tail and lengthen head/tail clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(90, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(90, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -347,7 +346,6 @@ def test_check_edited_trimmed_head(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - assert clipDataB.checkEdited(clipDataA) print("note is:", clipDataB.note) assert clipDataB.note == "trimmed head by 10 frames" @@ -355,24 +353,24 @@ def test_check_edited_trimmed_head(self): def test_check_edited_trimmed_tail(self): # check for trim head/tail and lengthen head/tail clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(90, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(90, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -383,31 +381,30 @@ def test_check_edited_trimmed_tail(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - assert clipDataB.checkEdited(clipDataA) assert clipDataB.note == "trimmed tail by 10 frames" def test_check_edited_lengthened_head(self): # check for trim head/tail and lengthen head/tail clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(20, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -417,7 +414,7 @@ def test_check_edited_lengthened_head(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - + assert clipDataB.checkEdited(clipDataA) print("note:", clipDataB.note) assert clipDataB.note == "lengthened head by 10 frames" @@ -425,24 +422,24 @@ def test_check_edited_lengthened_head(self): def test_check_edited_lengthened_tail(self): # check for trim head/tail and lengthen head/tail clipA = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24)), ) clipB = otio.schema.Clip( - name = "testName testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(100, 24))), - source_range = otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(20, 24)), + name="testName testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(100, 24))), + source_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(20, 24)), ) trackA = otio.schema.Track() trackB = otio.schema.Track() @@ -452,46 +449,47 @@ def test_check_edited_lengthened_tail(self): clipDataA = ClipData(clipA, 1) clipDataB = ClipData(clipB, 1) - + assert clipDataB.checkEdited(clipDataA) assert clipDataB.note == "lengthened tail by 10 frames" + class TestGetDif(unittest.TestCase): def test_find_clones(self): clipA = otio.schema.Clip( - name = "clipA testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), ) clipB = otio.schema.Clip( - name = "clipB testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24))), + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), ) clipC = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(20, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), ) clipCClone = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(30, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), ) clipD = otio.schema.Clip( - name = "clipD testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(40, 24), - otio.opentime.RationalTime(10, 24))), + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), ) trackA = otio.schema.Track() @@ -502,53 +500,54 @@ def test_find_clones(self): clipDataC = ClipData(clipC, 1) clipDataCClone = ClipData(clipCClone, 1) clipDataD = ClipData(clipD, 1) - + testClips = [clipDataA, clipDataB, clipDataC, clipDataCClone, clipDataD] clones, nonClones = getDiff.findClones(testClips) correctClones = {clipDataC.name: [clipDataC, clipDataCClone]} correctNonClones = [clipDataA, clipDataB, clipDataD] - assert(clones == correctClones), "Not all cloned clips correctly identified" - assert(nonClones == correctNonClones), "Not all unique clips correctly identified" - + assert (clones == correctClones + ), "Not all cloned clips correctly identified" + assert (nonClones == correctNonClones + ), "Not all unique clips correctly identified" def test_sort_clones_clones_in_both(self): # SETUP clipA = otio.schema.Clip( - name = "clipA testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), ) clipB = otio.schema.Clip( - name = "clipB testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24))), + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), ) clipC = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(20, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), ) clipCClone = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(30, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), ) clipD = otio.schema.Clip( - name = "clipD testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(40, 24), - otio.opentime.RationalTime(10, 24))), + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), ) trackA = otio.schema.Track() @@ -566,51 +565,55 @@ def test_sort_clones_clones_in_both(self): # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) - # VERIFY + # VERIFY clonesA, nonClonesA = sortedClonesA clonesB, nonClonesB = sortedClonesB - - assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" - assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" - assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" - assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 1 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 2 + ), "Number of non-clones found in trackB doesn't match" def test_sort_clones_clones_in_one(self): # SETUP clipA = otio.schema.Clip( - name = "clipA testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), ) clipB = otio.schema.Clip( - name = "clipB testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24))), + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), ) clipC = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(20, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), ) clipCClone = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(30, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), ) clipD = otio.schema.Clip( - name = "clipD testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(40, 24), - otio.opentime.RationalTime(10, 24))), + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), ) trackA = otio.schema.Track() @@ -628,51 +631,55 @@ def test_sort_clones_clones_in_one(self): # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) - # VERIFY + # VERIFY clonesA, nonClonesA = sortedClonesA clonesB, nonClonesB = sortedClonesB - - assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" - assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" - assert(len(clonesB) == 0), "Number of clones found in trackB doesn't match" - assert(len(nonClonesB) == 3), "Number of non-clones found in trackB doesn't match" - + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 0 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 3 + ), "Number of non-clones found in trackB doesn't match" + def test_sort_clones_clones_in_one_single_in_other(self): # SETUP clipA = otio.schema.Clip( - name = "clipA testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(0, 24), - otio.opentime.RationalTime(10, 24))), + name="clipA testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(0, 24), + otio.opentime.RationalTime(10, 24))), ) clipB = otio.schema.Clip( - name = "clipB testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(10, 24), - otio.opentime.RationalTime(10, 24))), + name="clipB testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(10, 24), + otio.opentime.RationalTime(10, 24))), ) clipC = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(20, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(20, 24), + otio.opentime.RationalTime(10, 24))), ) clipCClone = otio.schema.Clip( - name = "clipC testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(30, 24), - otio.opentime.RationalTime(10, 24))), + name="clipC testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(30, 24), + otio.opentime.RationalTime(10, 24))), ) clipD = otio.schema.Clip( - name = "clipD testTake", - media_reference = otio.core.MediaReference( - available_range=otio.opentime.TimeRange( - otio.opentime.RationalTime(40, 24), - otio.opentime.RationalTime(10, 24))), + name="clipD testTake", + media_reference=otio.core.MediaReference( + available_range=otio.opentime.TimeRange( + otio.opentime.RationalTime(40, 24), + otio.opentime.RationalTime(10, 24))), ) trackA = otio.schema.Track() @@ -690,18 +697,23 @@ def test_sort_clones_clones_in_one_single_in_other(self): # EXERCISE sortedClonesA, sortedClonesB = getDiff.sortClones(clipDatasA, clipDatasB) - # VERIFY + # VERIFY clonesA, nonClonesA = sortedClonesA clonesB, nonClonesB = sortedClonesB - - assert(len(clonesA) == 1), "Number of clones found in trackA doesn't match" - assert(len(nonClonesA) == 2), "Number of non-clones found in trackA doesn't match" - assert(len(clonesB) == 1), "Number of clones found in trackB doesn't match" - assert(len(nonClonesB) == 2), "Number of non-clones found in trackB doesn't match" + + assert (len(clonesA) == 1 + ), "Number of clones found in trackA doesn't match" + assert (len(nonClonesA) == 2 + ), "Number of non-clones found in trackA doesn't match" + assert (len(clonesB) == 1 + ), "Number of clones found in trackB doesn't match" + assert (len(nonClonesB) == 2 + ), "Number of non-clones found in trackB doesn't match" # TODO: test case for timelines with unmatched track nums # test case for timeline with matched track nums + class TestMakeOtio(unittest.TestCase): # TODO: test sort clips @@ -710,4 +722,4 @@ class TestMakeOtio(unittest.TestCase): if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() From 15d88f850411c630b600037d557b07f708632705 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Tue, 19 Aug 2025 15:25:41 -0700 Subject: [PATCH 25/30] fixed trackA to use unchanged clips from timeline A instead of timeline B for correct timing Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 3 +- .../console/otiodiff/getDiff.py | 28 ++++++++++--------- .../console/otiodiff/makeOtio.py | 22 +++++++++++---- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 857bc3207..6a2cc8bcb 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -78,7 +78,8 @@ def checkSame(self, cA): # check in same place on timeline if (self.timeline_range == cA.timeline_range): isSame = True - # check duration is same but not necessarily in same place on timeline + # check duration is same but not necessarily in same place + # on timeline # TODO: change to else? (does the elif always run?) elif (self.sameDuration(cA)): # Note: check in relation to left and right? diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index b0445c069..9c84f8255 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -92,8 +92,8 @@ def findClones(clips): Returns: clones (dictionary): dictionary of all clones in the group of ClipDatas - keys: name of clone - values: list of ClipDatas of that name + keys: name of clone + values: list of ClipDatas of that name nonClones (list): list of unique clones in group of ClipDatas """ @@ -208,6 +208,8 @@ def compareClips(clipDatasA, clipDatasB): if cB.name not in namesA: added.append(cB) else: + if namesA[cB.name] is None: + print("has none pair") cB.matched_clipData = namesA[cB.name] isSame = cB.checkSame(cB.matched_clipData) if (isSame): @@ -407,17 +409,17 @@ def categorizeClipsByTracks(tracksA, tracksB): Returns: clipTable (dictionary): dictionary holding categorized ClipDatas, organized by the track number of the ClipDatas - dictionary keys: track number (int) - dictionary values: dictionary holding categorized - ClipDatas of that track - nested dictionary keys: category name (string) - nested dictionary values: list of ClipDatas that fall - into the category - - ex: clipTable when tracksA and tracksB contain 3 tracks - {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} - 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} - 3 : {"add": [], "edit": [], "same": [], "delete": []}, "move": []} + dictionary keys: track number (int) + dictionary values: dictionary holding categorized + ClipDatas of that track + nested dictionary keys: category name (string) + nested dictionary values: list of ClipDatas that fall + into the category + + Ex: clipTable when tracksA and tracksB contain 3 tracks + {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} + 3 : {"add": [], "edit": [], "same": [], "delete": []}, "move": []} """ clipTable = {} diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index bfab84a32..d4b9cfe83 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -141,6 +141,15 @@ def makeTrackB(clipGroup, trackNum, trackKind): return flatB +def getMatchedClips(clipGroup): + pairedClips = [] + for clipData in clipGroup: + # print(clipData.name, clipData.matched_clipData) + if clipData.matched_clipData is None: + pairedClips.append(clipData) + else: + pairedClips.append(clipData.matched_clipData) + return pairedClips def makeTrackA(clipGroup, trackNum, trackKind): """Make an annotated track from timeline A. Shows deleted clips and the original clips @@ -161,12 +170,13 @@ def makeTrackA(clipGroup, trackNum, trackKind): """ # for each category of clips, make an indivdual track and color code accordingly - tSame = makeTrack("same", trackKind, clipGroup.same) - # grab the original pair from all the edit clipDatas - prevEdited = [] - for e in clipGroup.edit: - prevEdited.append(e.matched_clipData) - tEdited = makeTrack("edited", trackKind, prevEdited, editedClipsColor) + # grab the original pair from all the edit and same clipDatas since they only + # save the ones in timeline B + originalEdited = getMatchedClips(clipGroup.edit) + originalUnchanged = getMatchedClips(clipGroup.same) + + tSame = makeTrack("same", trackKind, originalUnchanged) + tEdited = makeTrack("edited", trackKind, originalEdited, editedClipsColor) tDel = makeTrack("deleted", trackKind, clipGroup.delete, deletedClipsColor) # put all the tracks into a list and flatten them down to a single track From afa7ba4cc5b765b665a1e8a8830db3328d18a102 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 20 Aug 2025 10:18:59 -0700 Subject: [PATCH 26/30] padded track set that have less tracks with empty tracks for matched processing Signed-off-by: Yingjie Wang --- .../console/otiodiff/getDiff.py | 115 +++++++++++------- .../console/otiodiff/makeOtio.py | 3 + .../opentimelineio/console/otiotool.py | 9 +- 3 files changed, 75 insertions(+), 52 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 9c84f8255..56d53e5ad 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -238,7 +238,7 @@ def compareClips(clipDatasA, clipDatasB): def compareTracks(trackA, trackB, trackNum): - """Compare clipis in two OTIO tracks and categorize into + """Compare clips in two OTIO tracks and categorize into added, edited, same, and deleted""" clipDatasA = [] clipDatasB = [] @@ -297,9 +297,11 @@ def checkMoved(allDel, allAdd): for clip in moved: clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) - # TODO: check if empty string or not for i in moveEdit: - i.note += " and moved from track " + str(i.matched_clipData.track_num) + if i.note == "": + i.note = "Moved from track: " + str(clip.matched_clipData.track_num) + else: + i.note += " and moved from track " + str(i.matched_clipData.track_num) # print(i.name, i.note) return newAdd, moveEdit, moved, newDel @@ -393,8 +395,6 @@ def makeNewOtio(clipTable, trackType): return newTl -# TODO: rename to create bucket/cat/db/stuff; categorizeClipsByTracks + comment - def categorizeClipsByTracks(tracksA, tracksB): """Compare the clips in each track in tracksB against the corresponding track @@ -423,55 +423,29 @@ def categorizeClipsByTracks(tracksA, tracksB): """ clipTable = {} - # TODO? ^change to class perhaps? low priority - - shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB - # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - - # TODO: compute min of 2, then go through leftover and assign accordingly - # maybe compare unmatched against empty track? pad shorter one with empty - # Process Matched Tracks - # index through all the tracks of the timeline with less tracks - for i in range(0, len(shorterTlTracks)): + matchedTrackNum = min(len(tracksA), len(tracksB)) + totalTrackNum = max(len(tracksA), len(tracksB)) + print(matchedTrackNum) + print(totalTrackNum) + + trackNumDiff = totalTrackNum - matchedTrackNum + shorterTracks = tracksA if len(tracksA) < len(tracksB) else tracksB + + for i in range(0, trackNumDiff): + # pad shorter tracks with empty tracks + shorterTracks.append(makeOtio.makeEmptyTrack(shorterTracks[0].kind)) + + print(len(tracksA)) + + for i in range(0, totalTrackNum): currTrackA = tracksA[i] currTrackB = tracksB[i] trackNum = i + 1 - # clipGroup = compareTracks(currTrackA, currTrackB, trackNum) add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) - # print(add) clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} - # print("here", clipTable[trackNum]["add"][0].name) - - # Process Unmatched Tracks - if shorterTlTracks == tracksA: - # timelineA is shorter so timelineB has added tracks - for i in range(len(shorterTlTracks), len(tracksB)): - newTrack = tracksB[i] - trackNum = i + 1 - # newTrack.name = trackType + " B" + str(trackNum) - - added = [] - for c in newTrack.find_clips(): - cd = ClipData(c, trackNum) - added.append(cd) - - clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} - - else: - for i in range(len(shorterTlTracks), len(tracksA)): - newTrack = tracksA[i] - trackNum = i + 1 - # newTrack.name = trackType + " A" + str(trackNum) - - deleted = [] - for c in newTrack.find_clips(): - cd = ClipData(c, trackNum) - deleted.append(cd) - - clipTable[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} # recat added/deleted into moved clipTable = sortMoved(clipTable) @@ -575,3 +549,52 @@ def makeTimelineSummary(timelineA, timelineB): /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio ''' + + +# ========= + + + # shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB + # # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) + + # # Process Matched Tracks + # # index through all the tracks of the timeline with less tracks + # for i in range(0, len(shorterTlTracks)): + # currTrackA = tracksA[i] + # currTrackB = tracksB[i] + # trackNum = i + 1 + + # # clipGroup = compareTracks(currTrackA, currTrackB, trackNum) + # add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) + # # print(add) + + # clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} + # # print("here", clipTable[trackNum]["add"][0].name) + + # # Process Unmatched Tracks + # if shorterTlTracks == tracksA: + # # timelineA is shorter so timelineB has added tracks + # for i in range(len(shorterTlTracks), len(tracksB)): + # newTrack = tracksB[i] + # trackNum = i + 1 + # # newTrack.name = trackType + " B" + str(trackNum) + + # added = [] + # for c in newTrack.find_clips(): + # cd = ClipData(c, trackNum) + # added.append(cd) + + # clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} + + # else: + # for i in range(len(shorterTlTracks), len(tracksA)): + # newTrack = tracksA[i] + # trackNum = i + 1 + # # newTrack.name = trackType + " A" + str(trackNum) + + # deleted = [] + # for c in newTrack.find_clips(): + # cd = ClipData(c, trackNum) + # deleted.append(cd) + + # clipTable[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index d4b9cfe83..d300d498d 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -56,6 +56,9 @@ def makeSeparaterTrack(trackType): from the timeline B tracks""" return otio.schema.Track(name="=====================", kind=trackType) +def makeEmptyTrack(trackType): + """Make empty track""" + return otio.schema.Track(kind=trackType) def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False): """Make OTIO track from ClipDatas with option to add markers diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index e9da4e3f5..246da8b45 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -119,11 +119,9 @@ def main(): for timeline in timelines: copy_media_to_folder(timeline, args.copy_media_to_folder) - # TODO: Update numbering - # ===== NEW Phase 5.5: Diff otio files ====== + # Phase 5.5 Diff two timelines if args.diff: - # TODO? stack, concat, diff make mutually exclusive timelines = [diff_otio(timelines)] # Phase 6: Remove/Redaction @@ -522,11 +520,10 @@ def read_inputs(input_paths): def diff_otio(timelines): - # TODO: check file format of timelines for OTIO """Return an annotated timeline showing how clips changed from the first to the second timeline""" - assert len( - timelines) >= 2, "Less than 2 timelines given. 2 timelines are required" + assert (len(timelines) >= 2 + ), "Less than 2 timelines given. 2 timelines are required" " to perform a diff" if len(timelines) != 2: From 22e86922b1498db0ab9a6ff271163e8d0493d9df Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 20 Aug 2025 10:49:35 -0700 Subject: [PATCH 27/30] added ability to specify comparisons by name or by full_name for clipDatas Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 1 - .../console/otiodiff/getDiff.py | 57 ++++++++++++------- .../console/otiodiff/makeOtio.py | 2 - 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 6a2cc8bcb..bb62bed6f 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -70,7 +70,6 @@ def checkSame(self, cA): # check names are same if self.sameName(cA): # check source range is same - # TODO: call trimmed range instead of source range ??? # TODO: make test where has null source range -> see things break, # then go back and change <- low priority if (self.source_range == cA.source_range): diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 56d53e5ad..c73dbad9e 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -187,7 +187,7 @@ def compareClones(clonesA, clonesB): return added, unchanged, deleted -def compareClips(clipDatasA, clipDatasB): +def compareClips(clipDatasA, clipDatasB, nameType=""): """Compare two groups of unique ClipDatas and categorize into added, edited, unchanged, and deleted""" namesA = {} @@ -198,19 +198,32 @@ def compareClips(clipDatasA, clipDatasB): unchanged = [] deleted = [] - for c in clipDatasA: - namesA[c.name] = c - for c in clipDatasB: - namesB[c.name] = c + # use full_name if nameType is specified, + # otherwise default to using name + if nameType.lower() == "full": + for c in clipDatasA: + namesA[c.full_name] = c + for c in clipDatasB: + namesB[c.full_name] = c + else: + for c in clipDatasA: + namesA[c.name] = c + for c in clipDatasB: + namesB[c.name] = c for cB in clipDatasB: + # check which name to use + clipDataBName = cB.name + if nameType.lower() == "full": + clipDataBName = cB.full_name - if cB.name not in namesA: + # do comparisons + if clipDataBName not in namesA: added.append(cB) else: - if namesA[cB.name] is None: + if namesA[clipDataBName] is None: print("has none pair") - cB.matched_clipData = namesA[cB.name] + cB.matched_clipData = namesA[clipDataBName] isSame = cB.checkSame(cB.matched_clipData) if (isSame): # cB.pair = namesA[cB.name] @@ -222,15 +235,18 @@ def compareClips(clipDatasA, clipDatasB): edited.append(cB) else: print("======== not categorized ==========") - cA = namesA[cB.name] - print("Clips: ", cA.name, cB.name) + cA = namesA[clipDataBName] + print("Clips: ", cA.name, clipDataBName) # cA.printData() # cB.printData() # print("===================") # print type of object for cA in clipDatasA: - if cA.name not in namesB: + clipDataAName = cA.name + if nameType.lower() == "full": + clipDataAName = cA.full_name + if clipDataAName not in namesB: deleted.append(cA) # TODO: some can be sets instead of lists @@ -283,17 +299,14 @@ def checkMoved(allDel, allAdd): # wanted to compare full names to account for dif dep/take # otherwise shotA (layout123) and shotA (anim123) would count as a move and # not as add - # TODO: maybe preserve full name and also clip name, ex. id and name - # TODO: fix compareClips so that it allows check by full name - for c in allDel: - c.name = c.full_name - for c in allAdd: - c.name = c.full_name - - newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd) + newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd, nameType="full") + # removes clips that are moved in same track, just keep clips moved between tracks moved = [clip for clip in moved if clip.track_num != clip.matched_clipData.track_num] + + # print(len(moved), len(moveEdit)) + for clip in moved: clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) @@ -426,8 +439,8 @@ def categorizeClipsByTracks(tracksA, tracksB): matchedTrackNum = min(len(tracksA), len(tracksB)) totalTrackNum = max(len(tracksA), len(tracksB)) - print(matchedTrackNum) - print(totalTrackNum) + # print(matchedTrackNum) + # print(totalTrackNum) trackNumDiff = totalTrackNum - matchedTrackNum shorterTracks = tracksA if len(tracksA) < len(tracksB) else tracksB @@ -436,7 +449,7 @@ def categorizeClipsByTracks(tracksA, tracksB): # pad shorter tracks with empty tracks shorterTracks.append(makeOtio.makeEmptyTrack(shorterTracks[0].kind)) - print(len(tracksA)) + # print(len(tracksA)) for i in range(0, totalTrackNum): currTrackA = tracksA[i] diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index d300d498d..f6a0c385b 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -48,8 +48,6 @@ def addMarker(newClip, clipData, color=None): return newClip -# TODO: make variables for add, edit, delete, move colors? - def makeSeparaterTrack(trackType): """Make empty track that separates the timeline A tracks From 704a4f952f33e82c471be4038a7366320f10282e Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 20 Aug 2025 10:57:54 -0700 Subject: [PATCH 28/30] Added docstrings for clipData Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index bb62bed6f..1185ddf5a 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -2,11 +2,19 @@ # TODO: clip comparable??? ClipInfo # source clip or clip ref? -# full name = name + version, name is just name, -# add ex, split on space, b4 is name, after is version - class ClipData: + """ClipData holds information from an OTIO clip that's necessary for + comparing differences. It also keeps some information associated with + the clip after comparisons are made, such as a matched ClipData and a note + about what has changed. + + source_clip = original OTIO clip the ClipData represents + full_name = full name of source_clip + name and version splits full_name on space + ex: full_name: clipA version1, name: clipA, version: version1 + """ + full_name = "" name = "" version = None # currently not used in comparisons @@ -34,12 +42,14 @@ def __init__(self, source_clip, track_num, note=None): # uses structure of "clipA v1" where clipA is the name and v1 is the version def splitFullName(self, clip): + """Split full name into name and version""" shortName = clip.name.split(" ")[0] version = clip.name.split(" ")[1] if len(clip.name.split(" ")) > 1 else None return shortName, version def printData(self): + """Prints to console all parameters of ClipData""" print("name: ", self.name) print("version: ", self.version) print("media ref: ", self.media_ref) @@ -51,21 +61,23 @@ def printData(self): print("note: ", self.note) print("source clip: ", self.source.name) - # compare truncated names def sameName(self, cA): + """Compare names and returns if they are the same""" if (self.name.lower() == cA.name.lower()): return True else: return False # note: local and source duration should always match, can assume same - # compare the duration within the timeline for 2 clips def sameDuration(self, cA): + """Compare duration within the timeline of this ClipData + against another ClipData""" return self.timeline_range.duration.value == cA.timeline_range.duration.value - # compare 2 clips and see if they are the exact same, whether exact or moved along - # the timeline and also changes note based on edits + def checkSame(self, cA): + """Check if this ClipData is the exact same as another ClipData or if + it's the same just moved along the timeline. Updates note based on edits""" isSame = False # check names are same if self.sameName(cA): @@ -94,9 +106,8 @@ def checkSame(self, cA): return isSame - # compare 2 clips and see if they have been - # compare self: "new", to old def checkEdited(self, cA): + """Compare 2 ClipDatas and see if they have been edited""" isEdited = False # Note: assumption that source range and timeline range duration always equal @@ -128,8 +139,8 @@ def checkEdited(self, cA): if (selfDur.value == cADur.value): self.note = "start time in source range changed" -# put note assignment into function, return note? -# self, other, olderClipData rather than cA + # TODO: put note assignment into function, return note? + # self, other, olderClipData rather than cA # clip duration shorter elif (selfDur.value < cADur.value): self.note = "trimmed " + deltaFramesStr + " frames" From cae61a4351ba9a60b6c8a0e966919e04d44459a6 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Wed, 20 Aug 2025 17:02:21 -0700 Subject: [PATCH 29/30] added todo's Signed-off-by: Yingjie Wang --- .../console/otiodiff/getDiff.py | 3 +++ tests/test_otiodiff.py | 19 ++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index c73dbad9e..45fa1b6e9 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -6,6 +6,7 @@ from .clipData import ClipData from . import makeOtio +# TODO: change some todos to be suggestions for future work instead of todos def diffTimelines(timelineA, timelineB): '''Diff two OTIO timelines and identify how clips on video and/or audio tracks @@ -289,6 +290,7 @@ def compareTracks(trackA, trackB, trackNum): return added, edited, unchanged, deleted # TODO? account for move edit, currently only identifies strictly moved +# TODO: update all "same" to "unchanged" def checkMoved(allDel, allAdd): @@ -547,6 +549,7 @@ def makeTimelineSummary(timelineA, timelineB): print("Timeline duration did not change") print("") +# TODO:remove notes before push ''' ======= Notes ======= Test shot simple: diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index 6a1aa4e95..a17f43a13 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -455,6 +455,22 @@ def test_check_edited_lengthened_tail(self): class TestGetDif(unittest.TestCase): + # TODO: test case for timelines with unmatched track nums + # test case for timeline with matched track nums + + def test_single_track(self): + pass + + def test_multi_track_matched(self): + pass + + def test_multi_track_unmatched_more_A(self): + pass + + def test_multi_track_unmatched_more_B(self): + pass + + def test_find_clones(self): clipA = otio.schema.Clip( name="clipA testTake", @@ -710,9 +726,6 @@ def test_sort_clones_clones_in_one_single_in_other(self): assert (len(nonClonesB) == 2 ), "Number of non-clones found in trackB doesn't match" - # TODO: test case for timelines with unmatched track nums - # test case for timeline with matched track nums - class TestMakeOtio(unittest.TestCase): # TODO: test sort clips From cb34d0601602847227ada164fe16d942e46d8a44 Mon Sep 17 00:00:00 2001 From: Yingjie Wang Date: Fri, 22 Aug 2025 15:27:44 -0700 Subject: [PATCH 30/30] Code cleanup Signed-off-by: Yingjie Wang --- .../console/otiodiff/clipData.py | 52 ++----- .../console/otiodiff/getDiff.py | 136 +++--------------- .../console/otiodiff/makeOtio.py | 3 - .../opentimelineio/console/otiotool.py | 5 - tests/test_otiodiff.py | 8 +- 5 files changed, 30 insertions(+), 174 deletions(-) diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py index 1185ddf5a..2615ce621 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/clipData.py @@ -14,37 +14,25 @@ class ClipData: name and version splits full_name on space ex: full_name: clipA version1, name: clipA, version: version1 """ - - full_name = "" - name = "" - version = None # currently not used in comparisons - media_ref = None - source_range = otio.opentime.TimeRange() - timeline_range = otio.opentime.TimeRange() - track_num = None # not originally stored in otio.schema.Clip - source_clip = otio.schema.Clip() - # everything below holds comparison result info - note = "" - matched_clipData = None - def __init__(self, source_clip, track_num, note=None): self.full_name = source_clip.name - self.name = self.splitFullName(source_clip)[0] - self.version = self.splitFullName(source_clip)[1] + self.name, self.version = self.splitFullName(source_clip) self.media_ref = source_clip.media_reference self.source_range = source_clip.source_range self.timeline_range = source_clip.trimmed_range_in_parent() self.track_num = track_num self.source_clip = source_clip self.note = note + self.matched_clipData = None # split full name into name of clip and version by white space # uses structure of "clipA v1" where clipA is the name and v1 is the version - def splitFullName(self, clip): - """Split full name into name and version""" - shortName = clip.name.split(" ")[0] - version = clip.name.split(" ")[1] if len(clip.name.split(" ")) > 1 else None + """Split full name into name and version by space. Returns None for + version if full name contains no spaces.""" + parts = clip.name.split(" ") + shortName = parts[0] + version = parts[1] if len(parts) > 1 else None return shortName, version @@ -82,10 +70,7 @@ def checkSame(self, cA): # check names are same if self.sameName(cA): # check source range is same - # TODO: make test where has null source range -> see things break, - # then go back and change <- low priority if (self.source_range == cA.source_range): - # print(self.name, " ", self.timeline_range, " ", cA.timeline_range) # check in same place on timeline if (self.timeline_range == cA.timeline_range): isSame = True @@ -93,17 +78,10 @@ def checkSame(self, cA): # on timeline # TODO: change to else? (does the elif always run?) elif (self.sameDuration(cA)): - # Note: check in relation to left and right? - # know if moved in seq rather than everything shifted over - # because of lengthen/shorten of other clips + # Note: currently only checks for lateral shifts, doesn't + # check for reordering of clips isSame = True self.note = "shifted laterally in track" - else: - # print("source range different", cA.name, self.name) - # print(self.media_ref) - # print(self.media_ref.target_url) - pass - return isSame def checkEdited(self, cA): @@ -111,6 +89,7 @@ def checkEdited(self, cA): isEdited = False # Note: assumption that source range and timeline range duration always equal + # TODO: sometimes asserts get triggered, more investigation needed # assert(self.source_range.duration.value == self.timeline_range.duration.value # ), "clip source range and timeline range durations don't match" # assert(cA.source_range.duration.value == cA.timeline_range.duration.value @@ -122,15 +101,6 @@ def checkEdited(self, cA): selfSourceStart = self.source_range.start_time cASourceStart = cA.source_range.start_time - # clip duration same but referencing different areas on the same timeline - # if selfDur.value == cADur.value: - # if (self.source_range.start_time != cA.source_range.start_time): - # # print("source range dif between: ", self.name, "and", cA.name) - # # self.printData() - # # cA.printData() - # self.note = "source range start times differ" - # isEdited = True - if (self.source_range != cA.source_range): self.note = "source range changed" isEdited = True @@ -139,8 +109,6 @@ def checkEdited(self, cA): if (selfDur.value == cADur.value): self.note = "start time in source range changed" - # TODO: put note assignment into function, return note? - # self, other, olderClipData rather than cA # clip duration shorter elif (selfDur.value < cADur.value): self.note = "trimmed " + deltaFramesStr + " frames" diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py index 45fa1b6e9..9790730f8 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/getDiff.py @@ -6,8 +6,6 @@ from .clipData import ClipData from . import makeOtio -# TODO: change some todos to be suggestions for future work instead of todos - def diffTimelines(timelineA, timelineB): '''Diff two OTIO timelines and identify how clips on video and/or audio tracks changed from timeline A to timeline B. @@ -31,13 +29,9 @@ def diffTimelines(timelineA, timelineB): # check input timelines for video and audio tracks if len(timelineA.video_tracks()) > 0 or len(timelineB.video_tracks()) > 0: hasVideo = True - # else: - # print("no video tracks") if len(timelineA.audio_tracks()) > 0 or len(timelineB.audio_tracks()) > 0: hasAudio = True - # else: - # print("no audio tracks") makeTimelineSummary(timelineA, timelineB) @@ -49,7 +43,7 @@ def diffTimelines(timelineA, timelineB): audioClipTable = categorizeClipsByTracks( timelineA.audio_tracks(), timelineB.audio_tracks()) - makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "perTrack") + makeSummary(videoClipTable, otio.schema.Track.Kind.Video, "summary") makeSummary(audioClipTable, otio.schema.Track.Kind.Audio, "summary") videoTl = makeNewOtio(videoClipTable, otio.schema.Track.Kind.Video) @@ -73,16 +67,8 @@ def diffTimelines(timelineA, timelineB): else: print("No video or audio tracks found in both timelines.") - # Debug - # origClipCount = len(timelineA.find_clips()) + len(timelineB.find_clips()) - - # print(origClipCount) - # print(len(outputTimeline.find_clips())) - return outputTimeline -# TODO: make nonClones a set rather than a list - def findClones(clips): """Separate the cloned ClipDatas (ones that share the same name) from the @@ -124,22 +110,18 @@ def sortClones(clipDatasA, clipDatasB): clonesA, nonClonesA = findClones(clipDatasA) clonesB, nonClonesB = findClones(clipDatasB) - # move clips that are clones in the other files into the clones folder + # move clips that are clones in the other files into the clones table # leaves stricly unique clips in nonClones - # if a clip is a clone in the other timeline, put into clones dictionary + # if a clip is a clone in the other timeline, put into clones table for c in nonClonesA: if c.name in clonesB.keys(): clonesA[c.name] = [c] nonClonesA.remove(c) - # print("clone in B file: ", c.name) for c in nonClonesB: if c.name in clonesA.keys(): clonesB[c.name] = [c] nonClonesB.remove(c) - # print("clone in A file: ", c.name) - # clipCountA = 0 - # clipCountB = 0 return (clonesA, nonClonesA), (clonesB, nonClonesB) @@ -153,15 +135,14 @@ def compareClones(clonesA, clonesB): for nameB in clonesB: # if there are no clips in timeline A with the same name # as cloneB, all of the clones of cloneB are new and added - # print("name b: ", nameB) if nameB not in clonesA: added.extend(clonesB[nameB]) # name matched, there exists clones in both A and B, check if there are # same clips - # technically can be the first one is "edited" and the rest are + # Note: Potential categorization: first clone is "edited" and the rest are # "added"/"deleted" -> depends on how want to define - # currently, all clones that aren't the exact same get categorized as \ + # Note: currently, all clones that aren't the exact same get categorized as # either "added" or "deleted" else: clipsA = clonesA[nameB] @@ -183,8 +164,6 @@ def compareClones(clonesA, clonesB): if nameA not in clonesB: deleted.extend(clonesA[nameA]) - # print("from clones added: ", len(added), " deleted: ", len(deleted)) - return added, unchanged, deleted @@ -224,24 +203,19 @@ def compareClips(clipDatasA, clipDatasB, nameType=""): else: if namesA[clipDataBName] is None: print("has none pair") + cB.matched_clipData = namesA[clipDataBName] isSame = cB.checkSame(cB.matched_clipData) if (isSame): - # cB.pair = namesA[cB.name] unchanged.append(cB) else: isEdited = cB.checkEdited(cB.matched_clipData) if (isEdited): - # cB.matched_clipData = namesA[cB.name] edited.append(cB) else: print("======== not categorized ==========") cA = namesA[clipDataBName] print("Clips: ", cA.name, clipDataBName) - # cA.printData() - # cB.printData() - # print("===================") - # print type of object for cA in clipDatasA: clipDataAName = cA.name @@ -250,7 +224,6 @@ def compareClips(clipDatasA, clipDatasB, nameType=""): if clipDataAName not in namesB: deleted.append(cA) -# TODO: some can be sets instead of lists return added, edited, unchanged, deleted @@ -289,35 +262,32 @@ def compareTracks(trackA, trackB, trackNum): return added, edited, unchanged, deleted -# TODO? account for move edit, currently only identifies strictly moved + # TODO: update all "same" to "unchanged" def checkMoved(allDel, allAdd): - """Identify ClipDatas that have moved between different tracks""" + """Identify ClipDatas that have moved between different tracks. + """ # ones found as same = moved # ones found as edited = moved and edited - # wanted to compare full names to account for dif dep/take - # otherwise shotA (layout123) and shotA (anim123) would count as a move and - # not as add + # want to compare full names to account for dif departments/takes + # ex. shotA (layout123) and shotA (anim123) should count as an add + # rather than a move newAdd, moveEdit, moved, newDel = compareClips(allDel, allAdd, nameType="full") # removes clips that are moved in same track, just keep clips moved between tracks moved = [clip for clip in moved if clip.track_num != clip.matched_clipData.track_num] - # print(len(moved), len(moveEdit)) - for clip in moved: clip.note = "Moved from track: " + str(clip.matched_clipData.track_num) - # print(i.name, i.track_num, i.note, i.pair.name, i.pair.track_num) for i in moveEdit: if i.note == "": i.note = "Moved from track: " + str(clip.matched_clipData.track_num) else: i.note += " and moved from track " + str(i.matched_clipData.track_num) - # print(i.name, i.note) return newAdd, moveEdit, moved, newDel @@ -332,7 +302,6 @@ def sortMoved(clipTable): for track in clipTable.keys(): clipGroup = clipTable[track] - # print(clipTable[track]["add"]) if "add" in clipGroup.keys(): allAdd.extend(clipGroup["add"]) if "delete" in clipGroup.keys(): @@ -344,12 +313,14 @@ def sortMoved(clipTable): clipGroup["move"] = [] + # currently only adds moved clips to table, ignores moved and edited clips add, moveEdit, moved, delete = checkMoved(allDel, allAdd) # currently moved clips are still marked as delete in timelineA for cd in moved: clipTable[cd.track_num]["add"].remove(cd) clipTable[cd.track_num]["move"].append(cd) + # moved clips should be marked as moved in timelineA rather than deleted # clipTable[cd.track_num]["delete"].remove(cd) # clipTable[cd.pair.track_num]["moved"].append(cd.pair) @@ -406,8 +377,6 @@ def makeNewOtio(clipTable, trackType): newTl.tracks.extend(tracksInA) - # makeOtio.colorMovedA(newTl, clipTable) - return newTl @@ -434,15 +403,13 @@ def categorizeClipsByTracks(tracksA, tracksB): Ex: clipTable when tracksA and tracksB contain 3 tracks {1 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} 2 : {"add": [], "edit": [], "same": [], "delete": [], "move": []} - 3 : {"add": [], "edit": [], "same": [], "delete": []}, "move": []} + 3 : {"add": [], "edit": [], "same": [], "delete": [], "move": []}} """ clipTable = {} matchedTrackNum = min(len(tracksA), len(tracksB)) totalTrackNum = max(len(tracksA), len(tracksB)) - # print(matchedTrackNum) - # print(totalTrackNum) trackNumDiff = totalTrackNum - matchedTrackNum shorterTracks = tracksA if len(tracksA) < len(tracksB) else tracksB @@ -450,8 +417,6 @@ def categorizeClipsByTracks(tracksA, tracksB): for i in range(0, trackNumDiff): # pad shorter tracks with empty tracks shorterTracks.append(makeOtio.makeEmptyTrack(shorterTracks[0].kind)) - - # print(len(tracksA)) for i in range(0, totalTrackNum): currTrackA = tracksA[i] @@ -462,10 +427,9 @@ def categorizeClipsByTracks(tracksA, tracksB): clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} - # recat added/deleted into moved + # recategorize added/deleted into moved clipTable = sortMoved(clipTable) - # tracksInA, tracksInB = makeNewOtio(clipTable, trackType) return clipTable @@ -547,70 +511,4 @@ def makeTimelineSummary(timelineA, timelineB): print(f"Timeline duration decreased by {delta:.2f} seconds") else: print("Timeline duration did not change") - print("") - -# TODO:remove notes before push - -''' ======= Notes ======= - Test shot simple: - /Users/yingjiew/Documents/testDifFiles/h150_104a.105j_2025.04.04_ANIM-flat.otio - /Users/yingjiew/Documents/testDifFiles/150_104a.105jD_2025.06.27-flat.otio - - Test seq matching edit's skywalker: - /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.09_Skywalker_v3.0_ - ChangeNotes.Relinked.01.otio - /Users/yingjiew/Folio/casa/Dream_EP101_2024.02.23_Skywalker_v4.0_ChangeNotes.otio - - Test shot multitrack: - /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2022.07.28_BT3.otio - /Users/yingjiew/Folio/edit-dept/More_OTIO/i110_BeliefSystem_2023.06.09.otio -''' - - -# ========= - - - # shorterTlTracks = tracksA if len(tracksA) < len(tracksB) else tracksB - # # print("len tracksA: ", len(tracksA), "len tracksB:", len(tracksB)) - - # # Process Matched Tracks - # # index through all the tracks of the timeline with less tracks - # for i in range(0, len(shorterTlTracks)): - # currTrackA = tracksA[i] - # currTrackB = tracksB[i] - # trackNum = i + 1 - - # # clipGroup = compareTracks(currTrackA, currTrackB, trackNum) - # add, edit, same, delete = compareTracks(currTrackA, currTrackB, trackNum) - # # print(add) - - # clipTable[trackNum] = {"add": add, "edit": edit, "same": same, "delete": delete} - # # print("here", clipTable[trackNum]["add"][0].name) - - # # Process Unmatched Tracks - # if shorterTlTracks == tracksA: - # # timelineA is shorter so timelineB has added tracks - # for i in range(len(shorterTlTracks), len(tracksB)): - # newTrack = tracksB[i] - # trackNum = i + 1 - # # newTrack.name = trackType + " B" + str(trackNum) - - # added = [] - # for c in newTrack.find_clips(): - # cd = ClipData(c, trackNum) - # added.append(cd) - - # clipTable[trackNum] = {"add": added, "edit": [], "same": [], "delete": []} - - # else: - # for i in range(len(shorterTlTracks), len(tracksA)): - # newTrack = tracksA[i] - # trackNum = i + 1 - # # newTrack.name = trackType + " A" + str(trackNum) - - # deleted = [] - # for c in newTrack.find_clips(): - # cd = ClipData(c, trackNum) - # deleted.append(cd) - - # clipTable[trackNum] = {"add": [], "edit": [], "same": [], "delete": deleted} \ No newline at end of file + print("") \ No newline at end of file diff --git a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py index f6a0c385b..f783c256a 100644 --- a/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py +++ b/src/py-opentimelineio/opentimelineio/console/otiodiff/makeOtio.py @@ -84,14 +84,12 @@ def makeTrack(trackName, trackKind, trackClips, clipColor=None, markersOn=False) track.append(gap) currentEnd = tlStart + tlDuration - # print("new end: ", currentEnd) else: currentEnd += tlDuration # add clip to track newClip = copy.deepcopy(clipData.source_clip) if clipColor is not None: - # testing newClip = addRavenColor(newClip, clipColor) newClip.color = clipColor @@ -145,7 +143,6 @@ def makeTrackB(clipGroup, trackNum, trackKind): def getMatchedClips(clipGroup): pairedClips = [] for clipData in clipGroup: - # print(clipData.name, clipData.matched_clipData) if clipData.matched_clipData is None: pairedClips.append(clipData) else: diff --git a/src/py-opentimelineio/opentimelineio/console/otiotool.py b/src/py-opentimelineio/opentimelineio/console/otiotool.py index 246da8b45..78f7de8aa 100755 --- a/src/py-opentimelineio/opentimelineio/console/otiotool.py +++ b/src/py-opentimelineio/opentimelineio/console/otiotool.py @@ -23,8 +23,6 @@ import opentimelineio as otio -# sys.path.append("src/py-opentimelineio/opentimelineio/console/otiodiff") - from .otiodiff import getDiff @@ -471,7 +469,6 @@ def parse_arguments(): are supported. Use '-' to write OTIO to standard output.""" ) - # NEW ============== parser.add_argument( "--diff", action="store_true", @@ -479,8 +476,6 @@ def parse_arguments(): and input file order matters""" ) - # ================== - args = parser.parse_args() # At least one of these must be specified diff --git a/tests/test_otiodiff.py b/tests/test_otiodiff.py index a17f43a13..35151e4f9 100644 --- a/tests/test_otiodiff.py +++ b/tests/test_otiodiff.py @@ -5,12 +5,8 @@ # import opentimelineio.console.otiodiff.makeOtio as makeOtio import opentimelineio.console.otiodiff.getDiff as getDiff -# from collections import namedtuple - - class TestClipData(unittest.TestCase): # check if the names of two ClipDatas are the same - def test_same_name(self): clipA = otio.schema.Clip( name="testName testTake", @@ -453,6 +449,8 @@ def test_check_edited_lengthened_tail(self): assert clipDataB.checkEdited(clipDataA) assert clipDataB.note == "lengthened tail by 10 frames" + # TODO: make test where clip has null source range + class TestGetDif(unittest.TestCase): # TODO: test case for timelines with unmatched track nums @@ -730,7 +728,7 @@ def test_sort_clones_clones_in_one_single_in_other(self): class TestMakeOtio(unittest.TestCase): # TODO: test sort clips - # test make track + # TODO: test make track pass