Skip to content

Commit a8351e2

Browse files
author
zyan3
committed
properly handle the cases where video decoding fails in compute_clips of class VideoClips
1 parent 2f6ede3 commit a8351e2

File tree

1 file changed

+9
-3
lines changed

1 file changed

+9
-3
lines changed

torchvision/datasets/video_utils.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,15 @@ def compute_clips(self, num_frames, step, frame_rate=None):
168168
self.clips = []
169169
self.resampling_idxs = []
170170
for video_pts, info in zip(self.video_pts, self.info):
171-
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, info["video_fps"], frame_rate)
172-
self.clips.append(clips)
173-
self.resampling_idxs.append(idxs)
171+
if "video_fps" in info:
172+
clips, idxs = self.compute_clips_for_video(video_pts, num_frames, step, info["video_fps"], frame_rate)
173+
self.clips.append(clips)
174+
self.resampling_idxs.append(idxs)
175+
else:
176+
# properly handle the cases where video decoding fails
177+
self.clips.append(torch.zeros(0, num_frames, dtype=torch.int64))
178+
self.resampling_idxs.append(torch.zeros(0, dtype=torch.int64))
179+
174180
clip_lengths = torch.as_tensor([len(v) for v in self.clips])
175181
self.cumulative_sizes = clip_lengths.cumsum(0).tolist()
176182

0 commit comments

Comments
 (0)