diff --git a/examples/stream_gui.py b/examples/stream_gui.py index 45607b3..c174bf9 100644 --- a/examples/stream_gui.py +++ b/examples/stream_gui.py @@ -32,7 +32,7 @@ class CameraPlayer(QWidget): def __init__(self, rtsp_url_wide, rtsp_url_telephoto, camera: Camera): super().__init__() self.setWindowTitle("Reolink PTZ Streamer") - self.setGeometry(10, 10, 1900, 600) + self.setGeometry(10, 10, 2400, 900) self.setFocusPolicy(Qt.FocusPolicy.StrongFocus) self.camera = camera diff --git a/examples/video_review_gui.py b/examples/video_review_gui.py index bff9fd1..011d8eb 100644 --- a/examples/video_review_gui.py +++ b/examples/video_review_gui.py @@ -6,16 +6,17 @@ import sys import re import datetime -import queue import subprocess +import argparse from configparser import RawConfigParser from datetime import datetime as dt, timedelta from reolinkapi import Camera -from PyQt6.QtWidgets import QApplication, QVBoxLayout, QHBoxLayout, QWidget, QTableWidget, QTableWidgetItem, QPushButton, QLabel, QFileDialog, QHeaderView, QStyle, QSlider, QStyleOptionSlider, QSplitter +from PyQt6.QtWidgets import QApplication, QVBoxLayout, QHBoxLayout, QWidget, QTableWidget, QTableWidgetItem, QPushButton, QLabel, QFileDialog, QHeaderView, QStyle, QSlider, QStyleOptionSlider, QSplitter, QTreeWidget, QTreeWidgetItem, QTreeWidgetItemIterator from PyQt6.QtMultimedia import QMediaPlayer, QAudioOutput from PyQt6.QtMultimediaWidgets import QVideoWidget -from PyQt6.QtCore import Qt, QUrl, QTimer, QThread, pyqtSignal, QMutex -from PyQt6.QtGui import QColor, QBrush, QFont +from PyQt6.QtCore import Qt, QUrl, QTimer, QThread, pyqtSignal, QMutex, QWaitCondition +from PyQt6.QtGui import QColor, QBrush, QFont, QIcon +from collections import deque import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -51,28 +52,61 @@ def decode_hex_to_flags(hex_value): def parse_filename(file_name): # Mp4Record_2024-08-12_RecM13_DST20240812_214255_214348_1F1E828_4DDA4D.mp4 + # Mp4Record_2024-09-13-RecS09_DST20240907_084519_084612_0_55289080000000_307BC0.mp4 # https://github.com/sven337/ReolinkLinux/wiki/Figuring-out-the-file-names#file-name-structure - pattern = r'.*?Mp4Record_(\d{4}-\d{2}-\d{2})_Rec[MS](\d)\d_DST(\d{8})_(\d{6})_(\d{6})_(\w{4,8})_(\w{4,8})\.mp4' + pattern = r'.*?Mp4Record_(\d{4}-\d{2}-\d{2})_Rec[MS](\d)(\d)_(DST)?(\d{8})_(\d{6})_(\d{6})' + v3_suffix = r'.*_(\w{4,8})_(\w{4,8})\.mp4' + v9_suffix = r'.*_(\d)_(\w{7})(\w{7})_(\w{4,8})\.mp4' match = re.match(pattern, file_name) + out = {} + version = 0 + if match: date = match.group(1) # YYYY-MM-DD - channel = int(match.group(2)) # Mx as integer - start_date = match.group(3) # YYYYMMDD - start_time = match.group(4) # HHMMSS - end_time = match.group(5) # HHMMSS - flags_hex = match.group(6) # flags hex - file_size = match.group(7) # second hexadecimal - + channel = int(match.group(2)) + version = int(match.group(3)) # version + start_date = match.group(5) # YYYYMMDD + start_time = match.group(6) # HHMMSS + end_time = match.group(7) # HHMMSS + # Combine date and start time into a datetime object start_datetime = datetime.datetime.strptime(f"{start_date} {start_time}", "%Y%m%d %H%M%S") - triggers = decode_hex_to_flags(flags_hex) - - return {'start_datetime': start_datetime, 'channel': channel, 'end_time': end_time, 'triggers': triggers, 'file_size': file_size} + out = {'start_datetime': start_datetime, 'channel': channel, 'end_time': end_time } else: print("parse error") return None + + if version == 9: + match = re.match(v9_suffix, file_name) + if not match: + print(f"v9 parse error for {file_name}") + return None + + animal_type = match.group(1) + flags_hex1 = match.group(2) + flags_hex2 = match.group(3) + file_size = int(match.group(4), 16) + + triggers = decode_hex_to_flags(flags_hex1) + + out.update({'animal_type' : animal_type, 'file_size' : file_size, 'triggers' : triggers }) + + elif version == 2 or version == 3: + match = re.match(v3_suffix, file_name) + if not match: + print(f"v3 parse error for {file_name}") + return None + + flags_hex = match.group(1) + file_size = int(match.group(2), 16) + + triggers = decode_hex_to_flags(flags_hex) + + out.update({'file_size' : file_size, 'triggers' : triggers }) + + return out class ClickableSlider(QSlider): def mousePressEvent(self, event): @@ -101,7 +135,7 @@ def pixelPosToRangeValue(self, pos): return QStyle.sliderValueFromPosition(self.minimum(), self.maximum(), pos - sliderMin, sliderMax - sliderMin, opt.upsideDown) class DownloadThread(QThread): - download_complete = pyqtSignal(str) + download_complete = pyqtSignal(str, bool) download_start = pyqtSignal(str) def __init__(self, download_queue, cam): @@ -109,42 +143,58 @@ def __init__(self, download_queue, cam): self.download_queue = download_queue self.cam = cam self.mutex = QMutex() + self.wait_condition = QWaitCondition() self.is_running = True def run(self): while self.is_running: + self.mutex.lock() + if len(self.download_queue) == 0: + self.wait_condition.wait(self.mutex) + self.mutex.unlock() + + if not self.is_running: + break + try: - fname, output_path = self.download_queue.get(timeout=1) + fname, output_path = self.download_queue.popleft() output_path = os.path.join(video_storage_dir, output_path) - if os.path.isfile(output_path): - print(f"File already exists: {output_path}") - self.download_complete.emit(output_path) + print(f"Downloading: {fname}") + self.download_start.emit(output_path) + resp = self.cam.get_file(fname, output_path=output_path) + if resp: + print(f"Download complete: {output_path}") + self.download_complete.emit(output_path, True) else: - print(f"Downloading: {fname}") - self.download_start.emit(output_path) - resp = self.cam.get_file(fname, output_path=output_path) - if resp: - print(f"Download complete: {output_path}") - self.download_complete.emit(output_path) - else: - print(f"Download failed: {fname}") - except queue.Empty: + print(f"Download failed: {fname}") + self.download_complete.emit(output_path, False) + except IndexError: pass def stop(self): self.mutex.lock() self.is_running = False self.mutex.unlock() + self.wait_condition.wakeAll() + + def add_to_queue(self, fname, output_path, left=False): + self.mutex.lock() + if left: + self.download_queue.appendleft((fname, output_path)) + else: + self.download_queue.append((fname, output_path)) + self.wait_condition.wakeOne() + self.mutex.unlock() class VideoPlayer(QWidget): - file_exists_signal = pyqtSignal(str) + file_exists_signal = pyqtSignal(str, bool) def __init__(self, video_files): super().__init__() self.setWindowTitle("Reolink Video Review GUI") self.cam = cam - self.download_queue = queue.Queue() + self.download_queue = deque() self.download_thread = DownloadThread(self.download_queue, self.cam) self.download_thread.download_start.connect(self.on_download_start) self.download_thread.download_complete.connect(self.on_download_complete) @@ -160,23 +210,27 @@ def __init__(self, video_files): self.media_player.setPlaybackRate(1.5) # Create table widget to display video files - self.video_table = QTableWidget() - self.video_table.setColumnCount(9) - self.video_table.setHorizontalHeaderLabels(["Video Path", "Start Datetime", "End Time", "Channel", "Person", "Vehicle", "Pet", "Motion", "Timer" ]) - self.video_table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeMode.Interactive) - self.video_table.setSortingEnabled(True) - self.video_table.cellClicked.connect(self.play_video) + self.video_tree = QTreeWidget() + self.video_tree.setColumnCount(10) + self.video_tree.setHeaderLabels(["Status", "Video Path", "Start Datetime", "End Time", "Channel", "Person", "Vehicle", "Pet", "Motion", "Timer"]) + self.video_tree.setSortingEnabled(True) + self.video_tree.itemClicked.connect(self.play_video) # Set smaller default column widths - self.video_table.setColumnWidth(0, 120) # Video Path - self.video_table.setColumnWidth(1, 130) # Start Datetime - self.video_table.setColumnWidth(2, 80) # End Time - self.video_table.setColumnWidth(3, 35) # Channel - self.video_table.setColumnWidth(4, 35) # Person - self.video_table.setColumnWidth(5, 35) # Vehicle - self.video_table.setColumnWidth(6, 35) # Pet - self.video_table.setColumnWidth(7, 35) # Motion - self.video_table.setColumnWidth(8, 30) # Timer + self.video_tree.setColumnWidth(0, 35) # Status + self.video_tree.setColumnWidth(1, 120) # Video Path + self.video_tree.setColumnWidth(2, 130) # Start Datetime + self.video_tree.setColumnWidth(3, 70) # End Time + self.video_tree.setColumnWidth(4, 35) # Channel + self.video_tree.setColumnWidth(5, 35) # Person + self.video_tree.setColumnWidth(6, 35) # Vehicle + self.video_tree.setColumnWidth(7, 35) # Pet + self.video_tree.setColumnWidth(8, 35) # Motion + self.video_tree.setColumnWidth(9, 30) # Timer + + self.video_tree.setIndentation(10) + + QIcon.setThemeName("Adwaita") # Create open button to select video files self.open_button = QPushButton("Open Videos") @@ -188,7 +242,11 @@ def __init__(self, video_files): self.mpv_button = QPushButton("MPV") self.mpv_button.clicked.connect(self.open_in_mpv) - + + self.get_highres_button = QPushButton("GetHighRes") + self.get_highres_button.clicked.connect(self.get_highres_stream_for_file) + self.get_highres_button.setEnabled(False) # Disable by default + # Create seek slider self.seek_slider = ClickableSlider(Qt.Orientation.Horizontal) self.seek_slider.setRange(0, 0) @@ -213,7 +271,7 @@ def __init__(self, video_files): # Left side (table and open button) left_widget = QWidget() left_layout = QVBoxLayout(left_widget) - left_layout.addWidget(self.video_table) + left_layout.addWidget(self.video_tree) left_layout.addWidget(self.open_button) splitter.addWidget(left_widget) @@ -229,6 +287,7 @@ def __init__(self, video_files): control_layout.addWidget(self.play_button) control_layout.addWidget(self.seek_slider) control_layout.addWidget(self.mpv_button) + control_layout.addWidget(self.get_highres_button) controls_layout.addLayout(control_layout) speed_layout = QHBoxLayout() @@ -251,103 +310,173 @@ def __init__(self, video_files): def add_initial_videos(self, video_files): for video_path in video_files: self.add_video(video_path) - self.video_table.sortItems(1, Qt.SortOrder.DescendingOrder) + self.video_tree.sortItems(2, Qt.SortOrder.DescendingOrder) +# self.video_tree.expandAll() def open_videos(self): file_dialog = QFileDialog(self) file_dialog.setNameFilters(["Videos (*.mp4 *.avi *.mov)"]) file_dialog.setFileMode(QFileDialog.FileMode.ExistingFiles) if file_dialog.exec(): - self.video_table.setSortingEnabled(False) + self.video_tree.setSortingEnabled(False) for file in file_dialog.selectedFiles(): self.add_video(os.path.basename(file)) - self.video_table.setSortingEnabled(True) - self.video_table.sortItems(1, Qt.SortOrder.DescendingOrder) + self.video_tree.setSortingEnabled(True) + self.video_tree.sortItems(2, Qt.SortOrder.DescendingOrder) +# self.video_tree.expandAll() def add_video(self, video_path): # We are passed the camera file name, e.g. Mp4Record/2024-08-12/RecM13_DST20240812_214255_214348_1F1E828_4DDA4D.mp4 file_path = path_name_from_camera_path(video_path) base_file_name = file_path parsed_data = parse_filename(file_path) - if parsed_data: - row_position = self.video_table.rowCount() - self.video_table.insertRow(row_position) - start_datetime_str = parsed_data['start_datetime'].strftime("%Y-%m-%d %H:%M:%S") - start_datetime_item = QTableWidgetItem(start_datetime_str) - start_datetime_item.setData(Qt.ItemDataRole.UserRole, parsed_data['start_datetime']) - - # Create the item for the first column with the base file name - file_name_item = QTableWidgetItem(base_file_name) + if not parsed_data: + print(f"Could not parse file {video_path}") + return + + start_datetime = parsed_data['start_datetime'] + channel = parsed_data['channel'] + + end_time = datetime.datetime.strptime(parsed_data['end_time'], "%H%M%S") + end_time_str = end_time.strftime("%H:%M:%S") + + video_item = QTreeWidgetItem() + video_item.setText(0, "") # Status + video_item.setTextAlignment(0, Qt.AlignmentFlag.AlignCenter) + video_item.setText(1, base_file_name) + video_item.setText(2, start_datetime.strftime("%Y-%m-%d %H:%M:%S")) + video_item.setData(2, Qt.ItemDataRole.UserRole, parsed_data['start_datetime']) + video_item.setText(3, end_time_str) + video_item.setText(4, str(channel)) + video_item.setText(5, "✓" if parsed_data['triggers']['ai_pd'] else "") + video_item.setText(6, "✓" if parsed_data['triggers']['ai_vd'] else "") + video_item.setText(7, "✓" if parsed_data['triggers']['ai_ad'] else "") + video_item.setText(8, "✓" if parsed_data['triggers']['is_motion_record'] else "") + video_item.setText(9, "✓" if parsed_data['triggers']['is_schedule_record'] else "") + + if parsed_data['triggers']['ai_other']: + print(f"File {file_path} has ai_other flag!") + + video_item.setToolTip(1, base_file_name) + # Set the style for queued status + grey_color = QColor(200, 200, 200) # Light grey + video_item.setForeground(1, QBrush(grey_color)) + font = QFont() + font.setItalic(True) + video_item.setFont(1, font) + + # Make the fields non-editable + iterator = QTreeWidgetItemIterator(self.video_tree) + while iterator.value(): + item = iterator.value() + item.setFlags(item.flags() & ~Qt.ItemFlag.ItemIsEditable) + iterator += 1 + + # Find a potentially pre-existing channel0 item for this datetime, if so, add as a child + # This lets channel1 appear as a child, but also main & sub videos appear in the same group + channel_0_item = self.find_channel_0_item(start_datetime) + if channel_0_item: + channel_0_item.addChild(video_item) + else: + self.video_tree.addTopLevelItem(video_item) - # Set the full path as tooltip - file_name_item.setToolTip(base_file_name) + output_path = os.path.join(video_storage_dir, base_file_name) + expected_size = parsed_data['file_size'] - # Set the style for queued status - grey_color = QColor(200, 200, 200) # Light grey - file_name_item.setForeground(QBrush(grey_color)) - font = QFont() - font.setItalic(True) - file_name_item.setFont(font) - - self.video_table.setItem(row_position, 0, file_name_item) - self.video_table.setItem(row_position, 1, start_datetime_item) - self.video_table.setItem(row_position, 2, QTableWidgetItem(parsed_data['end_time'])) - self.video_table.setItem(row_position, 3, QTableWidgetItem(f"{parsed_data['channel']}")) - - # Set individual trigger flags - self.video_table.setItem(row_position, 4, QTableWidgetItem("✓" if parsed_data['triggers']['ai_pd'] else "")) - self.video_table.setItem(row_position, 5, QTableWidgetItem("✓" if parsed_data['triggers']['ai_vd'] else "")) - self.video_table.setItem(row_position, 6, QTableWidgetItem("✓" if parsed_data['triggers']['ai_ad'] else "")) - self.video_table.setItem(row_position, 7, QTableWidgetItem("✓" if parsed_data['triggers']['is_motion_record'] else "")) - self.video_table.setItem(row_position, 8, QTableWidgetItem("✓" if parsed_data['triggers']['is_schedule_record'] else "")) - - if parsed_data['triggers']['ai_other']: - print(f"File {file_path} has ai_other flag!") - - # Make the fields non-editable - for column in range(self.video_table.columnCount()): - item = self.video_table.item(row_position, column) - item.setFlags(item.flags() & ~Qt.ItemFlag.ItemIsEditable) - - output_path = os.path.join(video_storage_dir, base_file_name) - if os.path.isfile(output_path): - self.file_exists_signal.emit(output_path) + need_download = True + if os.path.isfile(output_path): + actual_size = os.path.getsize(output_path) + if actual_size == expected_size: + need_download = False + self.file_exists_signal.emit(output_path, True) else: - # Add to download queue - self.download_queue.put((video_path, base_file_name)) + print(f"File size mismatch for {output_path}. Expected: {expected_size}, Actual: {actual_size}. Downloading again") + + if need_download: + video_item.setIcon(1, self.style().standardIcon(QStyle.StandardPixmap.SP_CommandLink)) + self.download_thread.add_to_queue(video_path, base_file_name) + + def find_channel_0_item(self, datetime_obj): + # Truncate seconds to nearest 10 + truncated_seconds = datetime_obj.second - (datetime_obj.second % 10) + truncated_datetime = datetime_obj.replace(second=truncated_seconds) + + for i in range(self.video_tree.topLevelItemCount()): + item = self.video_tree.topLevelItem(i) + item_datetime = item.data(2, Qt.ItemDataRole.UserRole) + item_truncated = item_datetime.replace(second=item_datetime.second - (item_datetime.second % 10)) + if item_truncated == truncated_datetime: + return item + return None + + def find_item_by_path(self, path): + iterator = QTreeWidgetItemIterator(self.video_tree) + while iterator.value(): + item = iterator.value() + text = item.text(1) + if text == path: + return item + iterator += 1 + print(f"Could not find item by path {path}") + return None + + def on_download_complete(self, video_path, success): + item = self.find_item_by_path(os.path.basename(video_path)) + if not item: + print(f"on_download_complete {video_path} did not find item?!") + item.setForeground(1, QBrush(QColor(0, 0, 0))) # Black color for normal text + font = item.font(1) + font.setItalic(False) + font.setBold(False) + item.setFont(1, font) + if success: + if "RecS" in item.text(1): + # One day (hopefully) offer the option to download the + # high-res version This is not trivial because we have to + # re-do a camera search for the relevant time period and + # match based on start and end dates (+/- one second in my + # experience) + # For now simply display that this is low-res. + item.setText(0, "sub") + item.setIcon(1, QIcon()) else: - print(f"Could not parse file {video_path}") - - def on_download_complete(self, video_path): - for row in range(self.video_table.rowCount()): - if self.video_table.item(row, 0).text() == os.path.basename(video_path): - file_name_item = self.video_table.item(row, 0) - file_name_item.setForeground(QBrush(QColor(0, 0, 0))) # Black color for normal text - font = QFont() - font.setItalic(False) - font.setBold(False) - file_name_item.setFont(font) - break - + item.setIcon(1, self.style().standardIcon(QStyle.StandardPixmap.SP_MessageBoxCritical)) + + def on_download_start(self, video_path): - for row in range(self.video_table.rowCount()): - if self.video_table.item(row, 0).text() == os.path.basename(video_path): - file_name_item = self.video_table.item(row, 0) - grey_color = QColor(200, 200, 200) # Light grey - file_name_item.setForeground(QBrush(grey_color)) - font = QFont() - font.setBold(True) - file_name_item.setFont(font) - break + item = self.find_item_by_path(os.path.basename(video_path)) + if item: + grey_color = QColor(200, 200, 200) # Light grey + item.setForeground(1, QBrush(grey_color)) + font = item.font(1) + font.setBold(True) + item.setFont(1, font) + item.setIcon(1, QIcon.fromTheme("emblem-synchronizing")) + else: + print(f"Cannot find item for {video_path}") - def play_video(self, row, column): - file_name_item = self.video_table.item(row, 0) - video_path = os.path.join(video_storage_dir, file_name_item.text()) + def play_video(self, file_name_item, column): + video_path = os.path.join(video_storage_dir, file_name_item.text(1)) - if file_name_item.font().italic() or file_name_item.foreground().color().lightness() >= 200: - print(f"Video {video_path} is not yet downloaded. Please wait.") + if file_name_item.font(1).italic() or file_name_item.foreground(1).color().lightness() >= 200: + print(f"Video {video_path} is not yet downloaded. Moving it to top of queue. Please wait for download.") + # Find the item in the download_queue that matches the base file name + found_item = None + for item in list(self.download_queue): + if item[1] == file_name_item.text(1): + found_item = item + break + + if found_item: + # Remove the item from its current position in the queue + self.download_queue.remove(found_item) + # Add the item to the end of the queue + self.download_thread.add_to_queue(*found_item, left=True) return + # Enable/disable GetHighRes button based on whether it's a sub stream + self.get_highres_button.setEnabled("RecS" in file_name_item.text(1)) + print(f"Playing video: {video_path}") url = QUrl.fromLocalFile(video_path) self.media_player.setSource(url) @@ -362,6 +491,28 @@ def start_playback(): # Timer needed to be able to play at seek offset in the video, otherwise setPosition seems ignored QTimer.singleShot(20, start_playback) + def get_highres_stream_for_file(self): + current_item = self.video_tree.currentItem() + if not current_item or "RecS" not in current_item.text(1): + return + + parsed_data = parse_filename(current_item.text(1)) + if not parsed_data: + print(f"Could not parse file {current_item.text(1)}") + return + + start_time = parsed_data['start_datetime'] - timedelta(seconds=1) + end_time = datetime.datetime.strptime(f"{parsed_data['start_datetime'].strftime('%Y%m%d')} {parsed_data['end_time']}", "%Y%m%d %H%M%S") + timedelta(seconds=1) + + main_files = self.cam.get_motion_files(start=start_time, end=end_time, streamtype='main', channel=parsed_data['channel']) + + if main_files: + for main_file in main_files: + self.add_video(main_file['filename']) + self.video_tree.sortItems(2, Qt.SortOrder.DescendingOrder) + else: + print(f"No main stream file found for {current_item.text(1)}") + def play_pause(self): if self.media_player.playbackState() == QMediaPlayer.PlaybackState.PlayingState: self.media_player.pause() @@ -408,7 +559,9 @@ def open_in_mpv(self): def closeEvent(self, event): self.download_thread.stop() - self.download_thread.wait() + self.download_thread.wait(1000) + self.download_thread.terminate() + self.cam.logout() super().closeEvent(event) def read_config(props_path: str) -> dict: @@ -430,13 +583,19 @@ def read_config(props_path: str) -> dict: def signal_handler(sig, frame): print("Exiting the application...") sys.exit(0) + cam.logout() + QApplication.quit() if __name__ == '__main__': signal.signal(signal.SIGINT, signal_handler) -# Read in your ip, username, & password -# (NB! you'll likely have to create this file. See tests/test_camera.py for details on structure) + + parser = argparse.ArgumentParser(description="Reolink Video Review GUI") + parser.add_argument('--sub', action='store_true', help="Search for sub channel instead of main channel") + parser.add_argument('files', nargs='*', help="Optional video file names to process") + args = parser.parse_args() + config = read_config('camera.cfg') ip = config.get('camera', 'ip') @@ -449,14 +608,19 @@ def signal_handler(sig, frame): start = dt.combine(dt.now(), dt.min.time()) end = dt.now() - processed_motions = cam.get_motion_files(start=start, end=end, streamtype='main', channel=0) - processed_motions += cam.get_motion_files(start=start, end=end, streamtype='main', channel=1) + + streamtype = 'sub' if args.sub else 'main' + processed_motions = cam.get_motion_files(start=start, end=end, streamtype=streamtype, channel=0) + processed_motions += cam.get_motion_files(start=start, end=end, streamtype=streamtype, channel=1) start = dt.now() - timedelta(days=1) end = dt.combine(start, dt.max.time()) - processed_motions += cam.get_motion_files(start=start, end=end, streamtype='main', channel=0) - processed_motions += cam.get_motion_files(start=start, end=end, streamtype='main', channel=1) + processed_motions += cam.get_motion_files(start=start, end=end, streamtype=streamtype, channel=0) + processed_motions += cam.get_motion_files(start=start, end=end, streamtype=streamtype, channel=1) + + if len(processed_motions) == 0: + print("Camera did not return any video?!") video_files = [] for i, motion in enumerate(processed_motions): @@ -464,9 +628,9 @@ def signal_handler(sig, frame): print("Processing %s" % (fname)) video_files.append(fname) - video_files.extend(sys.argv[1:]) + video_files.extend([os.path.basename(file) for file in args.files]) app = QApplication(sys.argv) player = VideoPlayer(video_files) - player.resize(1900, 1000) + player.resize(2400, 1000) player.show() sys.exit(app.exec())