Permalink
Browse files

FCP adapter uses clip item rate instead of source rate (#177)

* addresses #174
- uses clip item rate instead of source rate
- edited clipitem in premiere_example.xml that has different rate compared to its linked file
- fixed bug in timeline_widget, transitions don`t have trimmed_range_in_parent method
- fixed bug in timeline_widget, left over rename from stack to composition

* adresses change requests in #177
- maintain clip item rate on both read and write
  • Loading branch information...
bashesenaxis authored and jminor committed Dec 20, 2017
1 parent 1b1e893 commit 9cf5e93c3840ff8542b7558316fad525feb5c1d1
Showing with 67 additions and 45 deletions.
  1. +61 −39 opentimelineio/adapters/fcp_xml.py
  2. +3 −3 opentimelineview/timeline_widget.py
  3. +3 −3 tests/sample_data/premiere_example.xml
@@ -171,19 +171,22 @@ def _is_primary_audio_channel(track):
)
def _get_transition_cut_point(transition_item):
def _get_transition_cut_point(transition_item, element_map):
alignment = transition_item.find('./alignment').text
start = int(transition_item.find('./start').text)
end = int(transition_item.find('./end').text)
rate = _parse_rate(transition_item, element_map)
if alignment in ('end', 'end-black'):
return end
value = end
elif alignment in ('start', 'start-black'):
return start
value = start
elif alignment in ('center',):
return int((start + end) / 2)
value = int((start + end) / 2)
else:
return int((start + end) / 2)
value = int((start + end) / 2)
return otio.opentime.RationalTime(value, rate)
# -----------------------
@@ -271,13 +274,13 @@ def _parse_clip_item(clip_item, transition_offsets, element_map):
clip_item.find('./file'),
element_map
)
src_rate = _parse_rate(clip_item.find('./file'), element_map)
item_rate = _parse_rate(clip_item, element_map)
# transition offsets are provided in timeline rate. If they deviate they
# need to be rescaled to clip item rate
context_transition_offsets = [
transition_offsets[0].rescaled_to(src_rate),
transition_offsets[1].rescaled_to(src_rate)
transition_offsets[0].rescaled_to(item_rate),
transition_offsets[1].rescaled_to(item_rate)
]
in_frame = (
@@ -292,9 +295,11 @@ def _parse_clip_item(clip_item, transition_offsets, element_map):
# source_start in xml is taken relative to the start of the media, whereas
# we want the absolute start time, taking into account the timecode
start_time = otio.opentime.RationalTime(in_frame, item_rate) + timecode
source_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(in_frame, src_rate) + timecode,
duration=otio.opentime.RationalTime(out_frame - in_frame, src_rate)
start_time=start_time.rescaled_to(item_rate),
duration=otio.opentime.RationalTime(out_frame - in_frame, item_rate)
)
# get the clip name from the media reference if not defined on the clip
@@ -310,15 +315,23 @@ def _parse_clip_item(clip_item, transition_offsets, element_map):
media_reference=media_reference,
source_range=source_range
)
clip.markers.extend([_parse_marker(m, src_rate) for m in markers])
clip.markers.extend([_parse_marker(m, item_rate) for m in markers])
return clip
def _parse_transition_item(transition_item, track_rate):
start = int(transition_item.find('./start').text)
end = int(transition_item.find('./end').text)
cut_point = _get_transition_cut_point(transition_item)
def _parse_transition_item(transition_item, element_map):
rate = _parse_rate(transition_item, element_map)
start = otio.opentime.RationalTime(
int(transition_item.find('./start').text),
rate
)
end = otio.opentime.RationalTime(
int(transition_item.find('./end').text),
rate
)
cut_point = _get_transition_cut_point(transition_item, element_map)
metadata = {
META_NAMESPACE: {
'effectid': transition_item.find('./effect/effectid').text,
@@ -328,8 +341,8 @@ def _parse_transition_item(transition_item, track_rate):
transition = otio.schema.Transition(
name=transition_item.find('./effect/name').text,
transition_type=otio.schema.TransitionTypes.SMPTE_Dissolve,
in_offset=otio.opentime.RationalTime(cut_point - start, track_rate),
out_offset=otio.opentime.RationalTime(end - cut_point, track_rate),
in_offset=cut_point - start,
out_offset=end - cut_point,
metadata=metadata
)
return transition
@@ -366,7 +379,7 @@ def _parse_item(track_item, track_rate, transition_offsets, element_map):
# depending on the content of the clip-item, we return either a clip, a
# stack or a transition.
if track_item.tag == 'transitionitem':
return _parse_transition_item(track_item, track_rate)
return _parse_transition_item(track_item, element_map)
file_e = track_item.find('./file')
if file_e is not None:
@@ -385,7 +398,7 @@ def _parse_item(track_item, track_rate, transition_offsets, element_map):
raise TypeError(
'Type of clip item is not supported {item_id}'.format(
item_id=track_item.attrib['id']
item_id=track_item.attrib['id']
)
)
@@ -399,42 +412,48 @@ def _parse_top_level_track(track_e, kind, rate, element_map):
if not track_items:
return track
last_clip_end = 0
last_clip_end = otio.opentime.RationalTime(rate=rate)
for track_item in track_items:
clip_item_index = list(track_e).index(track_item)
start = int(track_item.find('./start').text)
end = int(track_item.find('./end').text)
start = otio.opentime.RationalTime(
int(track_item.find('./start').text),
rate
)
end = otio.opentime.RationalTime(
int(track_item.find('./end').text),
rate
)
# start time and end time on the timeline can be set to -1. This means
# that there is a transition at that end of the clip-item. So the time
# on the timeline has to be taken from that object.
transition_offsets = [
otio.opentime.RationalTime(),
otio.opentime.RationalTime()
otio.opentime.RationalTime(rate=rate),
otio.opentime.RationalTime(rate=rate)
]
if track_item.tag == 'clipitem':
if start == -1:
if start.value == -1:
in_transition = list(track_e)[clip_item_index - 1]
start = _get_transition_cut_point(in_transition)
transition_offsets[0] = otio.opentime.RationalTime(
start - int(in_transition.find('./start').text),
rate
start = _get_transition_cut_point(in_transition, element_map)
transition_offsets[0] = start - otio.opentime.RationalTime(
int(in_transition.find('./start').text),
_parse_rate(in_transition, element_map)
)
if end == -1:
if end.value == -1:
out_transition = list(track_e)[clip_item_index + 1]
end = _get_transition_cut_point(out_transition)
end = _get_transition_cut_point(out_transition, element_map)
transition_offsets[1] = otio.opentime.RationalTime(
int(out_transition.find('./end').text) - end,
rate
)
int(out_transition.find('./end').text),
_parse_rate(out_transition, element_map)
) - end
# see if we need to add a gap before this clip-item
gap_time = start - last_clip_end
last_clip_end = end
if gap_time > 0:
if gap_time.value > 0:
gap_range = otio.opentime.TimeRange(
duration=otio.opentime.RationalTime(gap_time, rate)
duration=gap_time.rescaled_to(rate)
)
track.append(otio.schema.Gap(source_range=gap_range))
@@ -534,8 +553,11 @@ def _build_item_timings(
# source_start is absolute time taking into account the timecode of the
# media. But xml regards the source in point from the start of the media.
# So we subtract the media timecode.
source_start = item.source_range.start_time - timecode
source_end = item.source_range.end_time_exclusive() - timecode
item_rate = item.source_range.start_time.rate
source_start = (item.source_range.start_time - timecode) \
.rescaled_to(item_rate)
source_end = (item.source_range.end_time_exclusive() - timecode) \
.rescaled_to(item_rate)
start = '{:.0f}'.format(timeline_range.start_time.value)
end = '{:.0f}'.format(timeline_range.end_time_exclusive().value)
@@ -711,7 +733,7 @@ def _build_clip_item(clip_item, timeline_range, transition_offsets, br_map):
clip_item_e.append(_build_file(clip_item.media_reference, br_map))
if clip_item.media_reference.available_range:
clip_item_e.append(
_build_rate(clip_item.media_reference.available_range.start_time)
_build_rate(clip_item.source_range.start_time)
)
clip_item_e.extend(_build_marker(m) for m in clip_item.markers)
@@ -255,8 +255,8 @@ def __init__(self, track, *args, **kwargs):
self._populate()
def _populate(self):
for item in self.track:
timeline_range = item.trimmed_range_in_parent()
for n, item in enumerate(self.track):
timeline_range = self.track.trimmed_range_of_child_at_index(n)
rect = QtCore.QRectF(
0,
@@ -553,7 +553,7 @@ def add_stack(self, stack):
tab_index = next(
(
i for i in range(self.count())
if stack is self.widget(i).scene().stack
if stack is self.widget(i).scene().composition
),
None
)
@@ -50,15 +50,15 @@
<masterclipid>masterclip-1</masterclipid>
<name>sc01_sh010_anim.mov</name>
<enabled>TRUE</enabled>
<duration>100</duration>
<duration>50</duration>
<rate>
<timebase>30</timebase>
<timebase>15</timebase>
<ntsc>FALSE</ntsc>
</rate>
<start>536</start>
<end>636</end>
<in>0</in>
<out>100</out>
<out>50</out>
<pproTicksIn>0</pproTicksIn>
<pproTicksOut>846720000000</pproTicksOut>
<alphatype>none</alphatype>

0 comments on commit 9cf5e93

Please sign in to comment.