diff --git a/depthai_sdk/examples/replay/looped-replay.py b/depthai_sdk/examples/replay/looped-replay.py new file mode 100644 index 000000000..04c2b2bc5 --- /dev/null +++ b/depthai_sdk/examples/replay/looped-replay.py @@ -0,0 +1,9 @@ +from depthai_sdk import OakCamera + +with OakCamera(replay='https://www.youtube.com/watch?v=Y1jTEyb3wiI') as oak: + oak.replay.set_loop(True) # <--- Enable looping of the video, so it will never end + + color = oak.create_camera('color') + nn = oak.create_nn('vehicle-detection-0202', color) + oak.visualize(nn, fps=True) + oak.start(blocking=True) diff --git a/depthai_sdk/src/depthai_sdk/oak_outputs/xout/xout_frames.py b/depthai_sdk/src/depthai_sdk/oak_outputs/xout/xout_frames.py index 0c1a0503f..9ab96360c 100644 --- a/depthai_sdk/src/depthai_sdk/oak_outputs/xout/xout_frames.py +++ b/depthai_sdk/src/depthai_sdk/oak_outputs/xout/xout_frames.py @@ -17,10 +17,16 @@ class XoutFrames(XoutBase): """ - Single message, no syncing required + Stream of frames. Single message, no syncing required. """ def __init__(self, frames: StreamXout, fps: float = 30, frame_shape: Tuple[int, ...] = None): + """ + Args: + frames: StreamXout object. + fps: Frames per second for the output stream. + frame_shape: Shape of the frame. If not provided, it will be inferred from the first frame. + """ self.frames = frames self.name = frames.name @@ -34,22 +40,16 @@ def __init__(self, frames: StreamXout, fps: float = 30, frame_shape: Tuple[int, def setup_visualize(self, visualizer: Visualizer, visualizer_enabled: bool, - name: str = None): + name: str = None + ) -> None: self._visualizer = visualizer self._visualizer_enabled = visualizer_enabled self.name = name or self.name - def setup_recorder(self, - recorder: VideoRecorder, - encoding: str = 'mp4v'): + def setup_recorder(self, recorder: VideoRecorder) -> None: self._video_recorder = recorder - # Enable encoding for the video recorder - self._video_recorder[self.name].set_fourcc(encoding) def visualize(self, packet: FramePacket) -> None: - """ - Called from main thread if visualizer is not None. - """ # Frame shape may be 1D, that means it's an encoded frame if self._visualizer.frame_shape is None or np.array(self._visualizer.frame_shape).ndim == 1: if self._frame_shape is not None: @@ -75,13 +75,10 @@ def visualize(self, packet: FramePacket) -> None: def on_record(self, packet) -> None: if self._video_recorder: - # TODO not ideal to check it this way if isinstance(self._video_recorder[self.name], AvWriter): self._video_recorder.write(self.name, packet.msg) else: self._video_recorder.write(self.name, packet.frame) - # else: - # self._video_recorder.add_to_buffer(self.name, packet.frame) def xstreams(self) -> List[StreamXout]: return [self.frames] @@ -100,6 +97,6 @@ def new_msg(self, name: str, msg) -> None: self.queue.put(packet, block=False) - def close(self): + def close(self) -> None: if self._video_recorder: self._video_recorder.close() diff --git a/depthai_sdk/src/depthai_sdk/readers/videocap_reader.py b/depthai_sdk/src/depthai_sdk/readers/videocap_reader.py index e22e3b9cf..2180ebc6e 100644 --- a/depthai_sdk/src/depthai_sdk/readers/videocap_reader.py +++ b/depthai_sdk/src/depthai_sdk/readers/videocap_reader.py @@ -19,10 +19,11 @@ class VideoCapReader(AbstractReader): Reads stream from mp4, mjpeg, h264, h265 """ - def __init__(self, path: Path) -> None: + def __init__(self, path: Path, loop: bool = False) -> None: self.initialFrames: Dict[str, Any] = dict() self.shapes: Dict[str, Tuple[int, int]] = dict() self.readers: Dict[str, cv2.VideoCapture] = dict() + self._is_looped = loop if path.is_file(): stream = path.stem if (path.stem in ['left', 'right']) else 'color' @@ -30,7 +31,8 @@ def __init__(self, path: Path) -> None: else: for fileName in os.listdir(str(path)): f_name, ext = os.path.splitext(fileName) - if ext not in _videoExt: continue + if ext not in _videoExt: + continue # Check if name of the file starts with left.. right.., or CameraBoardSocket if f_name.startswith('CameraBoardSocket'): @@ -68,15 +70,23 @@ def read(self): frames[name] = self.initialFrames[name].copy() self.initialFrames[name] = None - if not self.readers[name].isOpened(): return False + if not self.readers[name].isOpened(): + return False ok, frame = self.readers[name].read() - if not ok: return False + if not ok and self._is_looped: + self.readers[name].set(cv2.CAP_PROP_POS_FRAMES, 0) + ok, frame = self.readers[name].read() + elif not ok: + return False frames[name] = frame return frames + def set_loop(self, loop: bool): + self._is_looped = loop + def getStreams(self) -> List[str]: return [name for name in self.readers] diff --git a/depthai_sdk/src/depthai_sdk/recorders/video_writers/video_writer.py b/depthai_sdk/src/depthai_sdk/recorders/video_writers/video_writer.py index fd7bdedd4..684704f3d 100644 --- a/depthai_sdk/src/depthai_sdk/recorders/video_writers/video_writer.py +++ b/depthai_sdk/src/depthai_sdk/recorders/video_writers/video_writer.py @@ -1,7 +1,6 @@ -import logging from collections import deque from pathlib import Path -from typing import Union, Dict +from typing import Union try: import cv2 @@ -16,7 +15,21 @@ class VideoWriter(BaseWriter): - def __init__(self, path: Path, name: str, fourcc: str, fps: float): # TODO: fourcc is not used + """ + Writes raw streams to mp4 using cv2.VideoWriter. + """ + _fps: float + _path: str + + def __init__(self, path: Path, name: str, fourcc: str, fps: float): + """ + Args: + path: Path to save the output. Either a folder or a file. + name: Name of the stream. + fourcc: FourCC code of the codec used to compress the frames. + fps: Frames per second. + """ + super().__init__(path, name) self._fourcc = None @@ -48,7 +61,11 @@ def create_file_for_buffer(self, subfolder: str, buf_name: str): self.create_file(subfolder, frame) def create_file(self, subfolder: str, frame: Union[dai.ImgFrame, np.ndarray]): - path_to_file = create_writer_dir(self.path / subfolder, self.name, 'avi') + path_to_file = create_writer_dir(self.path / subfolder, self.name, 'mp4') + + if not path_to_file.endswith('.mp4'): + path_to_file = path_to_file[:-4] + '.mp4' + self._create_file(path_to_file, frame) def _create_file(self, path_to_file: str, frame: Union[dai.ImgFrame, np.ndarray]): @@ -57,32 +74,28 @@ def _create_file(self, path_to_file: str, frame: Union[dai.ImgFrame, np.ndarray] else: self._h, self._w = frame.getHeight(), frame.getWidth() - # Disparity - RAW8 - # Depth - RAW16 - if self._fourcc is None: - if isinstance(frame, np.ndarray): - c = 1 if frame.ndim == 2 else frame.shape[2] - self._fourcc = "GRAY" if c == 1 else "I420" - else: - if frame.getType() == dai.ImgFrame.Type.RAW16: # Depth - self._fourcc = "FFV1" - elif frame.getType() == dai.ImgFrame.Type.RAW8: # Mono Cams - self._fourcc = "GREY" - else: - self._fourcc = "I420" + if not isinstance(frame, np.ndarray): + frame = frame.getCvFrame() + c = 1 if frame.ndim == 2 else frame.shape[2] + + self._fourcc = 'mp4v' self._file = cv2.VideoWriter(path_to_file, cv2.VideoWriter_fourcc(*self._fourcc), self._fps, (self._w, self._h), - isColor=self._fourcc != "GREY") + isColor=c != 1) def write(self, frame: Union[dai.ImgFrame, np.ndarray]): if self._file is None: self.create_file(subfolder='', frame=frame) + self._file.write(frame if isinstance(frame, np.ndarray) else frame.getCvFrame()) - def close(self): - if self._file is not None: + def close(self) -> None: + """ + Close the file if it is open. + """ + if self._file: self._file.release() self._file = None diff --git a/depthai_sdk/src/depthai_sdk/replay.py b/depthai_sdk/src/depthai_sdk/replay.py index 9ed5cdd80..2d346d971 100644 --- a/depthai_sdk/src/depthai_sdk/replay.py +++ b/depthai_sdk/src/depthai_sdk/replay.py @@ -58,7 +58,7 @@ def __init__(self, path: str): streams. Args: - path (str): Path to the recording folder + path (str): Path to the recording folder. """ self.path = self._get_path(path) @@ -183,6 +183,19 @@ def set_fps(self, fps: float): else: self.fps = fps + def set_loop(self, flag: bool): + """ + Sets whether to loop the replay. + + Args: + flag (bool): Whether to loop the replay. + """ + from .readers.videocap_reader import VideoCapReader + if isinstance(self.reader, VideoCapReader): + self.reader.set_loop(flag) + else: + raise RuntimeError('Looping is only supported for video files.') + def get_fps(self) -> float: return self.fps @@ -284,9 +297,13 @@ def start(self, cb): def run(self, cb): delay = 1.0 / self.fps while True: - if not self.sendFrames(cb): break + if not self.sendFrames(cb): + break + time.sleep(delay) - if self._stop: break + if self._stop: + break + logging.info('Replay `run` thread stopped') self._stop = True @@ -309,7 +326,8 @@ def sendFrames(self, cb=None) -> bool: cb(stream_name, stream.imgFrame) # Don't send these frames to the OAK camera - if stream.disabled: continue + if stream.disabled: + continue # Send an imgFrame to the OAK camera stream.queue.send(stream.imgFrame) diff --git a/depthai_sdk/src/depthai_sdk/trigger_action/actions/record_action.py b/depthai_sdk/src/depthai_sdk/trigger_action/actions/record_action.py index 590e54173..7458b1c86 100644 --- a/depthai_sdk/src/depthai_sdk/trigger_action/actions/record_action.py +++ b/depthai_sdk/src/depthai_sdk/trigger_action/actions/record_action.py @@ -140,9 +140,9 @@ def setup(self, device: dai.Device, xouts: List['XoutFrames']): self.stream_names = [xout.frames.name for xout in xouts] # e.g., [color_video, color_bitstream] logging.debug(f'RecordAction: stream_names = {self.stream_names}') self.recorder.update(self.path, device, xouts) - self.run_thread() + self._run_thread() - def run_thread(self): + def _run_thread(self): buffers = {'before_t_1': self.duration_bt, 'after_t_1': self.duration_at} self.recorder.init_buffers(buffers) # does for every stream self.buffers_status = {'writing': {'before_t': 1, 'after_t': []},