Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Saving an opencv stream #953

Closed
albertoZurini opened this issue May 5, 2019 · 3 comments
Closed

Saving an opencv stream #953

albertoZurini opened this issue May 5, 2019 · 3 comments
Labels
images Related to ImageClip, or handling of images in general.

Comments

@albertoZurini
Copy link

So I have an RTSP stream and I'd like to save it using moviepy (for codec issues with opencv). Opencv syntax is super easy:

out = cv2.VideoWriter('./recording.mp4', fourcc, FPS, (frame_width,frame_height)) # fourcc = codec

while True:
    out.write(frame)

out.release()

I'd like to do the same thing with moviepy. On the internet I've seen a lot of examples doing something like this:

myclip = VideoFileClip('project_video.mp4')
output_vid = 'output.mp4'
clip = myclip.fl_image(vid_pipeline)
clip.write_videofile(output_vid, audio=False)

But this means that I have to have a video to start with.

So the actual problem is that I want to save an array of images which is dynamically build and I don't want to save each image on the disk and then import all the images on moviepy. Is this possible?

Specifications

  • Python Version: 3.6.6
  • Moviepy Version: 1.0.0
  • Platform Name: Ubuntu
  • Platform Version: 18.04
@albertoZurini
Copy link
Author

I've managed to write a solution:

from moviepy.editor import *
import cv2

class SaveVideo():
  def __init__(self, file_name, FPS):
    self.file_name = file_name
    self.FPS = FPS
    self.frames = []
  
  def add_frame(self, frame, correct=False):
    if correct:
      self.frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    else:
      self.frames.append(frame)
  
  def close(self):
    clip = ImageSequenceClip(list(self.frames), fps=self.FPS)
    clip.write_videofile('%s.mp4' % self.file_name, fps=self.FPS)


if __name__ == '__main__':
  import numpy as np
  sv = SaveVideo('a', 1)
  for i in range(10):
    frame = np.random.randn(720, 1280, 3)
    sv.add_frame(frame)
  sv.close()

@arsserpentarium
Copy link

arsserpentarium commented Oct 18, 2021

a little bit more beefy version:

import pyaudio as pa
from moviepy.editor import *
from moviepy.audio.AudioClip import AudioArrayClip
import numpy as np

from moviepy.Clip import Clip
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.AudioClip import AudioClip

import time

def clamp(minimum, x, maximum):
    return max(minimum, min(x, maximum))

def prep_aud(l, ch):
    arr = np.frombuffer(b''.join(l), dtype=np.int16) / 32767
    arr = arr.reshape((arr.shape[0] // ch, ch))
    return arr

def soundconv(data):
    return data

class mic_stream(object):
    def __init__(self, audio, mic, sc = soundconv):
        self.sc = sc
        self.device = mic
        d = {
            'rate': int(audio.get_device_info_by_index(i)['defaultSampleRate']),
            'chan': clamp(1, int(audio.get_device_info_by_index(i)['maxInputChannels']), 2),
            'form': pa.paInt16,
            'chnk': 512,
            'aud': audio
        }
        self.m_params = d
        self.q = []
        self.ast = audio.open(format=d['form'], channels=d['chan'],
                              rate=d['rate'], input_device_index=mic, input=True,
                              frames_per_buffer=d['chnk'], stream_callback=self.callback)
        while len(self.q) == 0:
            time.sleep(0.01)
        self.q = []
        self.record(False)
        print('mic is ready')

    def callback(self,in_data, frame_count, time_info, flag):
        self.q.append(self.sc(in_data))
        return in_data, pa.paContinue

    def record(self, a):
        if a ^ self.ast.is_active():
            if a:
                self.ast.start_stream()
            else:
                self.ast.stop_stream()

    def shutdown(self):
        self.record(False)
        self.ast.close()
        print('mic shutdown')

    def get(self):
        output, self.q = self.q, []
        return output

class AudioRecorder(AudioClip):
    def __init__(self, channels, rate, chunk):

        Clip.__init__(self)
        self.buffer = []
        self.fps = rate*2.0/channels
        self.chunk =chunk
        self.duration = 0.0
        self.len = 0
        self.nchannels = channels
        def make_frame(t):
            data = self.buffer
            if isinstance(t, np.ndarray):
                array_inds = np.round(self.fps * t).astype(int)
            else:
                data = self.buffer[np.round((self.fps * t)//self.chunk).astype(int)]
                array_inds = np.round(self.fps * t).astype(int) % self.chunk
            array = prep_aud(data, self.nchannels)
            in_array = (array_inds >= 0) & (array_inds < len(array))
            result = np.zeros((len(t), 2))
            result[in_array] = array[array_inds[in_array]]
            return result
        self.make_frame = make_frame

    def add_chunks(self, l):
        self.buffer.extend(l)
        self.len = len(self.buffer)
        self.duration = 1.0 * self.len * self.chunk / self.fps

class VideoRecorder(VideoClip):

    def __init__(
        self,
        f_shape,
        fps,
        with_mask=True,
        is_mask=False,
        recording=False,
    ):

        VideoClip.__init__(self)
        self.ismask = is_mask
        self.shape = f_shape
        self.size = f_shape[:2][::-1]
        self.fps = fps
        self.images_starts = []
        self.sequence = []
        self.audio = None
        self.duration = 0
        self.end = self.duration
        self.recording = recording
        self.time = time.time()
        self.frame = None

        def make_frame(t):
            if len(self.sequence) == 0:
                return np.zeros(self.shape, dtype=np.uint8)
            index = self.find_image_index(t)
            return self.sequence[index][:, :, :3]

        if with_mask and (self.shape[2] == 4):

            self.mask = VideoClip(is_mask=True)

            def mask_make_frame(t):
                index = self.find_image_index(t)
                return 1.0 * self.sequence[index][:, :, 3] / 255

            self.mask.make_frame = mask_make_frame
            self.mask.size = mask_make_frame(0).shape[:2][::-1]

        self.make_frame = make_frame

    def find_image_index(self, t):
        return max([i for i in range(len(self.sequence)) if self.images_starts[i] <= t])

    def record(self, a):
        if a ^ self.recording:
            if a:
                self.recording = True
                self.time = time.time()
            else:
                self.recording = False

    def audio_connect(self, recorder):
        self.audio = recorder
        return self

    def audio_rec(self,l):
        self.audio.add_chunks(l)

    def frame_append(self, frame, rgb=False):
        if rgb:
            fr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        else:
            fr = frame.copy()
        if len(self.sequence)==0:
            self.sequence.append(fr)
            self.images_starts.append(0)
            self.time = time.time()
            pass
        else:
            if self.recording:
                t = time.time()
                self.sequence.append(fr)
                if self.audio:
                    self.duration += self.audio.duration - self.duration
                else:
                    self.duration += t - self.time
                self.images_starts.append(self.duration)
                self.time = t
                pass
            else:
                self.sequence.append(fr)
                self.duration += 1.0 / fps - np.finfo(np.float32).eps
                self.images_starts.append(self.duration)

    def frame_set(self, fr, rgb=True):
        if self.frame is None:
            self.frame_append(fr, rgb=rgb)
            self.frame = fr
        else:
            if self.recording & (np.array_equal(fr, self.frame) is False):
                if fr.shape == self.shape:
                    if self.audio:
                        if self.audio.duration - self.duration >= 1.0 / fps - np.finfo(np.float32).eps:
                            self.frame_append(fr, rgb=rgb)
                    elif time.time() - self.time >= 1.0 / fps - np.finfo(np.float32).eps:
                        self.frame_append(fr, rgb=rgb)
                self.frame = fr

It's more effective, because you can write your clips on fly without excess memory usage. Yes, with sound too. And with possibility to get data from pyaudio microphone stream without too much headache.

@keikoro keikoro added the images Related to ImageClip, or handling of images in general. label Oct 24, 2021
@wikiwen
Copy link

wikiwen commented Jun 6, 2022

I also have same demand for this feature of writing frame on a stream like cv2.VideoWriter. Is there any code for this feature? Are the code of arsserpentarium merged to master branch?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
images Related to ImageClip, or handling of images in general.
Projects
None yet
Development

No branches or pull requests

4 participants