From f68c34c8081a54f31f39d53151a635b7d39afef9 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 13 Feb 2026 13:58:34 +0100 Subject: [PATCH 1/9] Add PyTorch support and refactor check_install Refactor check_install.py to support both PyTorch and TensorFlow backends and improve temporary file handling. Introduces TMP_DIR, MODELS_FOLDER, and separate run_pytorch_test/run_tensorflow_test helpers; PyTorch now exports and benchmarks an exported .pt checkpoint, TensorFlow model download logic is preserved. Replaces --nodisplay with --display, centralizes video download and assertions, tightens error handling for downloads, and ensures proper cleanup of temporary files. Also updates imports (urllib.error, export_modelzoo_model) and updates backend availability checks to require at least one backend. --- dlclive/check_install/check_install.py | 174 +++++++++++++++++-------- 1 file changed, 118 insertions(+), 56 deletions(-) diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index ae3e569..1f7dc82 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -8,101 +8,163 @@ import argparse import shutil import warnings +import urllib.error from pathlib import Path from dlclibrary.dlcmodelzoo.modelzoo_download import download_huggingface_model -import dlclive from dlclive.utils import download_file from dlclive.benchmark import benchmark_videos from dlclive.engine import Engine +from dlclive.modelzoo.pytorch_model_zoo_export import export_modelzoo_model from dlclive.utils import get_available_backends MODEL_NAME = "superanimal_quadruped" SNAPSHOT_NAME = "snapshot-700000.pb" +TMP_DIR = Path(__file__).parent / "dlc-live-tmp" +MODELS_FOLDER = TMP_DIR / "test_models" +TORCH_MODEL = "resnet_50" +TORCH_CONFIG = { + "checkpoint": MODELS_FOLDER / f"exported_quadruped_{TORCH_MODEL}.pt", + "super_animal": "superanimal_quadruped", +} +TF_MODEL_DIR = TMP_DIR / "DLC_Dog_resnet_50_iteration-0_shuffle-0" -def main(): - parser = argparse.ArgumentParser( - description="Test DLC-Live installation by downloading and evaluating a demo DLC project!" - ) - parser.add_argument( - "--nodisplay", - action="store_false", - help="Run the test without displaying tracking", - ) - args = parser.parse_args() - display = args.nodisplay - - if not display: - print("Running without displaying video") +MODELS_FOLDER.mkdir(parents=True, exist_ok=True) - # make temporary directory - print("\nCreating temporary directory...\n") - tmp_dir = Path(dlclive.__file__).parent / "check_install" / "dlc-live-tmp" - tmp_dir.mkdir(mode=0o775, exist_ok=True) - video_file = str(tmp_dir / "dog_clip.avi") - model_dir = tmp_dir / "DLC_Dog_resnet_50_iteration-0_shuffle-0" +def run_pytorch_test(video_file: str, display: bool = False): + if Engine.PYTORCH not in get_available_backends(): + raise NotImplementedError( + "PyTorch backend is not available. Please ensure PyTorch is installed to run the PyTorch test." + ) + # Download model from the DeepLabCut Model Zoo + export_modelzoo_model( + export_path=TORCH_CONFIG["checkpoint"], + super_animal=TORCH_CONFIG["super_animal"], + model_name=TORCH_MODEL, + ) + assert TORCH_CONFIG["checkpoint"].exists(), ( + f"Failed to export {TORCH_CONFIG['super_animal']} model" + ) + assert TORCH_CONFIG["checkpoint"].stat().st_size > 0, ( + f"Exported {TORCH_CONFIG['super_animal']} model is empty" + ) + benchmark_videos( + model_path=str(TORCH_CONFIG["checkpoint"]), + model_type="pytorch", + video_path=video_file, + display=display, + resize=0.5, + pcutoff=0.25, + pixels=1000, + ) - # download dog test video from github: - # Use raw.githubusercontent.com for direct file access - if not Path(video_file).exists(): - print(f"Downloading Video to {video_file}") - url_link = "https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-live/master/check_install/dog_clip.avi" - try: - download_file(url_link, video_file) - except (urllib.error.URLError, IOError) as e: - raise RuntimeError(f"Failed to download video file: {e}") from e - else: - print(f"Video file already exists at {video_file}, skipping download.") - # download model from the DeepLabCut Model Zoo +def run_tensorflow_test(video_file: str, display: bool = False): + if Engine.TENSORFLOW not in get_available_backends(): + raise NotImplementedError( + "TensorFlow backend is not available. Please ensure TensorFlow is installed to run the TensorFlow test." + ) + model_dir = TF_MODEL_DIR + model_dir.mkdir(parents=True, exist_ok=True) + assert model_dir.exists(), f"Model directory {model_dir} does not exist" if Path(model_dir / SNAPSHOT_NAME).exists(): print("Model already downloaded, using cached version") else: - print("Downloading superanimal_quadruped model from the DeepLabCut Model Zoo...") - download_huggingface_model(MODEL_NAME, model_dir) + print( + "Downloading superanimal_quadruped model from the DeepLabCut Model Zoo..." + ) + download_huggingface_model(MODEL_NAME, str(model_dir)) - # assert these things exist so we can give informative error messages - assert Path(video_file).exists(), f"Missing video file {video_file}" - assert Path( - model_dir / SNAPSHOT_NAME - ).exists(), f"Missing model file {model_dir / SNAPSHOT_NAME}" + assert Path(model_dir / SNAPSHOT_NAME).exists(), ( + f"Missing model file {model_dir / SNAPSHOT_NAME}" + ) - # run benchmark videos - print("\n Running inference...\n") benchmark_videos( model_path=str(model_dir), - model_type="base" if Engine.from_model_path(model_dir) == Engine.TENSORFLOW else "pytorch", + model_type="base", video_path=video_file, display=display, resize=0.5, - pcutoff=0.25 + pcutoff=0.25, + pixels=1000, ) - # deleting temporary files - print("\n Deleting temporary files...\n") + +def main(): + tmp_dir = None try: - shutil.rmtree(tmp_dir) - except PermissionError: - warnings.warn( - f"Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!" + parser = argparse.ArgumentParser( + description="Test DLC-Live installation by downloading and evaluating a demo DLC project!" + ) + parser.add_argument( + "--display", + action="store_true", + help="Run the test and display tracking", ) + args = parser.parse_args() + display = args.display + + if not display: + print("Running without displaying video") + + # make temporary directory + print("\nCreating temporary directory...\n") + tmp_dir = TMP_DIR + tmp_dir.mkdir(mode=0o775, exist_ok=True) + + video_file = str(tmp_dir / "dog_clip.avi") + + # download dog test video from github: + # Use raw.githubusercontent.com for direct file access + if not Path(video_file).exists(): + print(f"Downloading Video to {video_file}") + url_link = "https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-live/master/check_install/dog_clip.avi" + try: + download_file(url_link, video_file) + except (urllib.error.URLError, IOError) as e: + raise RuntimeError(f"Failed to download video file: {e}") from e + else: + print(f"Video file already exists at {video_file}, skipping download.") + + # assert these things exist so we can give informative error messages + assert Path(video_file).exists(), f"Missing video file {video_file}" + + for backend in get_available_backends(): + if backend == Engine.PYTORCH: + print("\nRunning PyTorch test...\n") + run_pytorch_test(video_file, display=display) + elif backend == Engine.TENSORFLOW: + print("\nRunning TensorFlow test...\n") + run_tensorflow_test(video_file, display=display) + else: + warnings.warn( + f"Unrecognized backend {backend}, skipping...", UserWarning + ) + + finally: + # deleting temporary files + print("\n Deleting temporary files...\n") + try: + if tmp_dir is not None and tmp_dir.exists(): + shutil.rmtree(tmp_dir) + except PermissionError: + warnings.warn( + f"Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!" + ) - print("\nDone!\n") + print("\nDone!\n") if __name__ == "__main__": - # Get available backends (emits a warning if neither TensorFlow nor PyTorch is installed) available_backends: list[Engine] = get_available_backends() print(f"Available backends: {[b.value for b in available_backends]}") - - # TODO: JR add support for PyTorch in check_install.py (requires some exported pytorch model to be downloaded) - if not Engine.TENSORFLOW in available_backends: + if len(available_backends) == 0: raise NotImplementedError( - "TensorFlow is not installed. Currently check_install.py only supports testing the TensorFlow installation." + "Neither TensorFlow nor PyTorch is installed. Please install at least one of these frameworks to run the installation test." ) main() From 6b5a95ca13698f34e6eded3f128d56201b1717a1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 13 Feb 2026 13:59:56 +0100 Subject: [PATCH 2/9] Add single_animal option to benchmark_videos Expose a single_animal flag on benchmark_videos (default False) and forward it to the underlying analysis call. This allows benchmarking to run in single-animal mode when using DeepLabCut-live exported models. --- dlclive/benchmark.py | 112 ++++++++++++++++++++++++------------------- 1 file changed, 64 insertions(+), 48 deletions(-) diff --git a/dlclive/benchmark.py b/dlclive/benchmark.py index 8b4a0b4..47b5e3b 100644 --- a/dlclive/benchmark.py +++ b/dlclive/benchmark.py @@ -10,8 +10,10 @@ import sys import time import warnings +from typing import TYPE_CHECKING from pathlib import Path - +import argparse +import os import colorcet as cc import cv2 import numpy as np @@ -23,10 +25,15 @@ from dlclive import DLCLive from dlclive import VERSION -from dlclive import __file__ as dlcfile from dlclive.engine import Engine from dlclive.utils import decode_fourcc +if TYPE_CHECKING: + try: + import tensorflow + except ImportError: + tensorflow = None + def download_benchmarking_data( target_dir=".", @@ -49,17 +56,20 @@ def download_benchmarking_data( if os.path.exists(zip_path): print(f"{zip_path} already exists. Skipping download.") else: + def show_progress(count, block_size, total_size): pbar.update(block_size) print(f"Downloading the benchmarking data from {url} ...") pbar = tqdm(unit="B", total=0, position=0, desc="Downloading") - filename, _ = urllib.request.urlretrieve(url, filename=zip_path, reporthook=show_progress) + filename, _ = urllib.request.urlretrieve( + url, filename=zip_path, reporthook=show_progress + ) pbar.close() print(f"Extracting {zip_path} to {target_dir} ...") - with zipfile.ZipFile(zip_path, 'r') as zip_ref: + with zipfile.ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(target_dir) @@ -81,6 +91,7 @@ def benchmark_videos( cmap="bmy", save_poses=False, save_video=False, + single_animal=False, ): """Analyze videos using DeepLabCut-live exported models. Analyze multiple videos and/or multiple options for the size of the video @@ -168,7 +179,7 @@ def benchmark_videos( im_size_out = [] for i in range(len(resize)): - print(f"\nRun {i+1} / {len(resize)}\n") + print(f"\nRun {i + 1} / {len(resize)}\n") this_inf_times, this_im_size, meta = benchmark( model_path=model_path, @@ -188,6 +199,7 @@ def benchmark_videos( save_poses=save_poses, save_video=save_video, save_dir=output, + single_animal=single_animal, ) inf_times.append(this_inf_times) @@ -257,7 +269,7 @@ def get_system_info() -> dict: dev_type = "GPU" dev = [torch.cuda.get_device_name(torch.cuda.current_device())] else: - from cpuinfo import get_cpu_info + from cpuinfo import get_cpu_info # noqa: F401 dev_type = "CPU" dev = get_cpu_info() @@ -275,9 +287,7 @@ def get_system_info() -> dict: } -def save_inf_times( - sys_info, inf_times, im_size, model=None, meta=None, output=None -): +def save_inf_times(sys_info, inf_times, im_size, model=None, meta=None, output=None): """Save inference time data collected using :function:`benchmark` with system information to a pickle file. This is primarily used through :function:`benchmark_videos` @@ -346,6 +356,7 @@ def save_inf_times( return True + def benchmark( model_path: str, model_type: str, @@ -357,8 +368,8 @@ def benchmark( single_animal: bool = True, cropping: list[int] | None = None, dynamic: tuple[bool, float, int] = (False, 0.5, 10), - n_frames: int =1000, - print_rate: bool=False, + n_frames: int = 1000, + print_rate: bool = False, precision: str = "FP32", display: bool = True, pcutoff: float = 0.5, @@ -434,7 +445,10 @@ def benchmark( if not cap.isOpened(): print(f"Error: Could not open video file {video_path}") return - im_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) + im_size = ( + int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), + ) if pixels is not None: resize = np.sqrt(pixels / (im_size[0] * im_size[1])) @@ -492,9 +506,7 @@ def benchmark( total_n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) n_frames = int( - n_frames - if (n_frames > 0) and n_frames < total_n_frames - else total_n_frames + n_frames if (n_frames > 0) and n_frames < total_n_frames else total_n_frames ) iterator = range(n_frames) if print_rate or display else tqdm(range(n_frames)) for _ in iterator: @@ -510,7 +522,7 @@ def benchmark( start_time = time.perf_counter() if frame_index == 0: - pose = dlc_live.init_inference(frame) # Loads model + pose = dlc_live.init_inference(frame) # Loads model else: pose = dlc_live.get_pose(frame) @@ -519,7 +531,9 @@ def benchmark( times.append(inf_time) if print_rate: - print("Inference rate = {:.3f} FPS".format(1 / inf_time), end="\r", flush=True) + print( + "Inference rate = {:.3f} FPS".format(1 / inf_time), end="\r", flush=True + ) if save_video: draw_pose_and_write( @@ -531,19 +545,17 @@ def benchmark( pcutoff=pcutoff, display_radius=display_radius, draw_keypoint_names=draw_keypoint_names, - vwriter=vwriter + vwriter=vwriter, ) frame_index += 1 if print_rate: - print("Mean inference rate: {:.3f} FPS".format(np.mean(1 / np.array(times)[1:]))) + print( + "Mean inference rate: {:.3f} FPS".format(np.mean(1 / np.array(times)[1:])) + ) - metadata = _get_metadata( - video_path=video_path, - cap=cap, - dlc_live=dlc_live - ) + metadata = _get_metadata(video_path=video_path, cap=cap, dlc_live=dlc_live) cap.release() @@ -558,19 +570,21 @@ def benchmark( else: individuals = [] n_individuals = len(individuals) or 1 - save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, timestamp=timestamp) + save_poses_to_files( + video_path, save_dir, n_individuals, bodyparts, poses, timestamp=timestamp + ) return times, im_size, metadata def setup_video_writer( - video_path:str, - save_dir:str, - timestamp:str, - num_keypoints:int, - cmap:str, - fps:float, - frame_size:tuple[int, int], + video_path: str, + save_dir: str, + timestamp: str, + num_keypoints: int, + cmap: str, + fps: float, + frame_size: tuple[int, int], ): # Set colors and convert to RGB cmap_colors = getattr(cc, cmap) @@ -582,7 +596,9 @@ def setup_video_writer( # Define output video path video_path = Path(video_path) video_name = video_path.stem # filename without extension - output_video_path = Path(save_dir) / f"{video_name}_DLCLIVE_LABELLED_{timestamp}.mp4" + output_video_path = ( + Path(save_dir) / f"{video_name}_DLCLIVE_LABELLED_{timestamp}.mp4" + ) # Get video writer setup fourcc = cv2.VideoWriter_fourcc(*"mp4v") @@ -595,6 +611,7 @@ def setup_video_writer( return colors, vwriter + def draw_pose_and_write( frame: np.ndarray, pose: np.ndarray, @@ -611,7 +628,9 @@ def draw_pose_and_write( if resize is not None and resize != 1.0: # Resize the frame - frame = cv2.resize(frame, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) + frame = cv2.resize( + frame, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR + ) # Scale pose coordinates pose = pose.copy() @@ -642,15 +661,10 @@ def draw_pose_and_write( lineType=cv2.LINE_AA, ) - vwriter.write(image=frame) -def _get_metadata( - video_path: str, - cap: cv2.VideoCapture, - dlc_live: DLCLive -): +def _get_metadata(video_path: str, cap: cv2.VideoCapture, dlc_live: DLCLive): try: fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC)) except Exception: @@ -687,7 +701,9 @@ def _get_metadata( return meta -def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, timestamp): +def save_poses_to_files( + video_path, save_dir, n_individuals, bodyparts, poses, timestamp +): """ Saves the detected keypoint poses from the video to CSV and HDF5 files. @@ -708,7 +724,7 @@ def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, t ------- None """ - import pandas as pd + import pandas as pd # noqa: F401 base_filename = Path(video_path).stem save_dir = Path(save_dir) @@ -725,7 +741,8 @@ def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, t else: individuals = [f"individual_{i}" for i in range(n_individuals)] pdindex = pd.MultiIndex.from_product( - [individuals, bodyparts, ["x", "y", "likelihood"]], names=["individuals", "bodyparts", "coords"] + [individuals, bodyparts, ["x", "y", "likelihood"]], + names=["individuals", "bodyparts", "coords"], ) pose_df = pd.DataFrame(flattened_poses, columns=pdindex) @@ -733,6 +750,7 @@ def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, t pose_df.to_hdf(h5_save_path, key="df_with_missing", mode="w") pose_df.to_csv(csv_save_path, index=False) + def _create_poses_np_array(n_individuals: int, bodyparts: list, poses: list): # Create numpy array with poses: max_frame = max(p["frame"] for p in poses) @@ -745,17 +763,15 @@ def _create_poses_np_array(n_individuals: int, bodyparts: list, poses: list): if pose.ndim == 2: pose = pose[np.newaxis, :, :] padded_pose = np.full(pose_target_shape, np.nan) - slices = tuple(slice(0, min(pose.shape[i], pose_target_shape[i])) for i in range(3)) + slices = tuple( + slice(0, min(pose.shape[i], pose_target_shape[i])) for i in range(3) + ) padded_pose[slices] = pose[slices] poses_array[frame] = padded_pose return poses_array -import argparse -import os - - def main(): """Provides a command line interface to benchmark_videos function.""" parser = argparse.ArgumentParser( From bd21686a991fb47bd100602e575f8a5cda4ea06e Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 13 Feb 2026 14:07:12 +0100 Subject: [PATCH 3/9] Enable display for dlc-live-test in CI Update .github/workflows/testing.yml to run the Model Benchmark Test with --display instead of --nodisplay. This allows dlc-live-test to run tests that require a display (e.g., visual/benchmarking checks) in the CI workflow. --- .github/workflows/testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 28d8b6c..159ec03 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -69,7 +69,7 @@ jobs: shell: bash - name: Run Model Benchmark Test - run: uv run dlc-live-test --nodisplay + run: uv run dlc-live-test --display - name: Run DLC Live Unit Tests run: uv run pytest From a4f6573f6af0f135f04b516996760b7da36f642d Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 13 Feb 2026 14:07:34 +0100 Subject: [PATCH 4/9] Update testing.yml --- .github/workflows/testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 159ec03..62b5956 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -69,7 +69,7 @@ jobs: shell: bash - name: Run Model Benchmark Test - run: uv run dlc-live-test --display + run: uv run dlc-live-test - name: Run DLC Live Unit Tests run: uv run pytest From 0629b15ee2270abd7dafc3167655dbc31c47c67f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 13 Feb 2026 14:16:17 +0100 Subject: [PATCH 5/9] Improve backend test robustness and imports Refactor test startup and error handling for check_install and simplify a type-only import. - dlclive/benchmark.py: replace the try/except tensorflow import under TYPE_CHECKING with a direct import (type ignored) to simplify typing logic. - dlclive/check_install/check_install.py: - Defer importing export_modelzoo_model into run_pytorch_test to avoid importing heavy modules unless PyTorch test runs. - Move MODELS_FOLDER.mkdir to after temporary directory creation. - Add a --nodisplay flag and set default for --display to False so CLI can explicitly disable display. - Comment out resize parameters in test calls and remove an unnecessary model_dir.exists() assertion. - Wrap per-backend test runs in try/except, collect backend failures, allow other backends to continue, and raise an aggregated RuntimeError if all backend tests fail. These changes improve robustness when some backends fail or are unavailable and reduce unnecessary imports during initial checks. --- dlclive/benchmark.py | 5 +-- dlclive/check_install/check_install.py | 53 +++++++++++++++++++------- 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/dlclive/benchmark.py b/dlclive/benchmark.py index 47b5e3b..5b6f58b 100644 --- a/dlclive/benchmark.py +++ b/dlclive/benchmark.py @@ -29,10 +29,7 @@ from dlclive.utils import decode_fourcc if TYPE_CHECKING: - try: - import tensorflow - except ImportError: - tensorflow = None + import tensorflow # type: ignore def download_benchmarking_data( diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index 1f7dc82..7b10cb2 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -16,7 +16,6 @@ from dlclive.utils import download_file from dlclive.benchmark import benchmark_videos from dlclive.engine import Engine -from dlclive.modelzoo.pytorch_model_zoo_export import export_modelzoo_model from dlclive.utils import get_available_backends MODEL_NAME = "superanimal_quadruped" @@ -31,10 +30,10 @@ } TF_MODEL_DIR = TMP_DIR / "DLC_Dog_resnet_50_iteration-0_shuffle-0" -MODELS_FOLDER.mkdir(parents=True, exist_ok=True) - def run_pytorch_test(video_file: str, display: bool = False): + from dlclive.modelzoo.pytorch_model_zoo_export import export_modelzoo_model + if Engine.PYTORCH not in get_available_backends(): raise NotImplementedError( "PyTorch backend is not available. Please ensure PyTorch is installed to run the PyTorch test." @@ -56,7 +55,7 @@ def run_pytorch_test(video_file: str, display: bool = False): model_type="pytorch", video_path=video_file, display=display, - resize=0.5, + # resize=0.5, pcutoff=0.25, pixels=1000, ) @@ -69,7 +68,6 @@ def run_tensorflow_test(video_file: str, display: bool = False): ) model_dir = TF_MODEL_DIR model_dir.mkdir(parents=True, exist_ok=True) - assert model_dir.exists(), f"Model directory {model_dir} does not exist" if Path(model_dir / SNAPSHOT_NAME).exists(): print("Model already downloaded, using cached version") else: @@ -87,7 +85,7 @@ def run_tensorflow_test(video_file: str, display: bool = False): model_type="base", video_path=video_file, display=display, - resize=0.5, + # resize=0.5, pcutoff=0.25, pixels=1000, ) @@ -102,8 +100,16 @@ def main(): parser.add_argument( "--display", action="store_true", + default=False, help="Run the test and display tracking", ) + parser.add_argument( + "--nodisplay", + action="store_false", + dest="display", + help=argparse.SUPPRESS, + ) + args = parser.parse_args() display = args.display @@ -114,6 +120,7 @@ def main(): print("\nCreating temporary directory...\n") tmp_dir = TMP_DIR tmp_dir.mkdir(mode=0o775, exist_ok=True) + MODELS_FOLDER.mkdir(parents=True, exist_ok=True) video_file = str(tmp_dir / "dog_clip.avi") @@ -131,19 +138,37 @@ def main(): # assert these things exist so we can give informative error messages assert Path(video_file).exists(), f"Missing video file {video_file}" + backend_failures = {} + any_backend_succeeded = False for backend in get_available_backends(): - if backend == Engine.PYTORCH: - print("\nRunning PyTorch test...\n") - run_pytorch_test(video_file, display=display) - elif backend == Engine.TENSORFLOW: - print("\nRunning TensorFlow test...\n") - run_tensorflow_test(video_file, display=display) - else: + try: + if backend == Engine.PYTORCH: + print("\nRunning PyTorch test...\n") + run_pytorch_test(video_file, display=display) + any_backend_succeeded = True + elif backend == Engine.TENSORFLOW: + print("\nRunning TensorFlow test...\n") + run_tensorflow_test(video_file, display=display) + any_backend_succeeded = True + else: + warnings.warn( + f"Unrecognized backend {backend}, skipping...", UserWarning + ) + except Exception as e: + backend_failures[backend] = e warnings.warn( - f"Unrecognized backend {backend}, skipping...", UserWarning + f"Error while running test for backend {backend}: {e}. " + "Continuing to test other available backends.", + UserWarning, ) + if not any_backend_succeeded and backend_failures: + failure_messages = "; ".join( + f"{b}: {exc}" for b, exc in backend_failures.items() + ) + raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") + finally: # deleting temporary files print("\n Deleting temporary files...\n") From f7c942b05fed65ce12924d987ef363b07c146314 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 17 Feb 2026 11:36:25 +0100 Subject: [PATCH 6/9] Refactor check_install tests and model paths Rename MODELS_FOLDER to MODELS_DIR and update references (TORCH_CONFIG checkpoint and TF_MODEL_DIR) for clearer naming. Change missing-backend errors from NotImplementedError to ImportError to better reflect installation issues. Simplify main(): consolidate arg parsing, consistently create TMP_DIR and MODELS_DIR, and add backend_results tracking to report per-backend SUCCESS/ERROR statuses with a printed summary. Improve error recording for backend failures and adjust cleanup check when removing the temporary directory. --- dlclive/check_install/check_install.py | 95 +++++++++++++++----------- 1 file changed, 57 insertions(+), 38 deletions(-) diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index 7b10cb2..cc34b10 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -22,20 +22,20 @@ SNAPSHOT_NAME = "snapshot-700000.pb" TMP_DIR = Path(__file__).parent / "dlc-live-tmp" -MODELS_FOLDER = TMP_DIR / "test_models" +MODELS_DIR = TMP_DIR / "test_models" TORCH_MODEL = "resnet_50" TORCH_CONFIG = { - "checkpoint": MODELS_FOLDER / f"exported_quadruped_{TORCH_MODEL}.pt", + "checkpoint": MODELS_DIR / f"exported_quadruped_{TORCH_MODEL}.pt", "super_animal": "superanimal_quadruped", } -TF_MODEL_DIR = TMP_DIR / "DLC_Dog_resnet_50_iteration-0_shuffle-0" +TF_MODEL_DIR = MODELS_DIR / "DLC_Dog_resnet_50_iteration-0_shuffle-0" def run_pytorch_test(video_file: str, display: bool = False): from dlclive.modelzoo.pytorch_model_zoo_export import export_modelzoo_model if Engine.PYTORCH not in get_available_backends(): - raise NotImplementedError( + raise ImportError( "PyTorch backend is not available. Please ensure PyTorch is installed to run the PyTorch test." ) # Download model from the DeepLabCut Model Zoo @@ -63,7 +63,7 @@ def run_pytorch_test(video_file: str, display: bool = False): def run_tensorflow_test(video_file: str, display: bool = False): if Engine.TENSORFLOW not in get_available_backends(): - raise NotImplementedError( + raise ImportError( "TensorFlow backend is not available. Please ensure TensorFlow is installed to run the TensorFlow test." ) model_dir = TF_MODEL_DIR @@ -92,38 +92,39 @@ def run_tensorflow_test(video_file: str, display: bool = False): def main(): - tmp_dir = None - try: - parser = argparse.ArgumentParser( - description="Test DLC-Live installation by downloading and evaluating a demo DLC project!" - ) - parser.add_argument( - "--display", - action="store_true", - default=False, - help="Run the test and display tracking", - ) - parser.add_argument( - "--nodisplay", - action="store_false", - dest="display", - help=argparse.SUPPRESS, - ) + backend_results = {} + + parser = argparse.ArgumentParser( + description="Test DLC-Live installation by downloading and evaluating a demo DLC project!" + ) + parser.add_argument( + "--display", + action="store_true", + default=False, + help="Run the test and display tracking", + ) + parser.add_argument( + "--nodisplay", + action="store_false", + dest="display", + help=argparse.SUPPRESS, + ) - args = parser.parse_args() - display = args.display + args = parser.parse_args() + display = args.display - if not display: - print("Running without displaying video") + if not display: + print("Running without displaying video") - # make temporary directory - print("\nCreating temporary directory...\n") - tmp_dir = TMP_DIR - tmp_dir.mkdir(mode=0o775, exist_ok=True) - MODELS_FOLDER.mkdir(parents=True, exist_ok=True) + # make temporary directory + print("\nCreating temporary directory...\n") + tmp_dir = TMP_DIR + tmp_dir.mkdir(mode=0o775, exist_ok=True) + MODELS_DIR.mkdir(parents=True, exist_ok=True) - video_file = str(tmp_dir / "dog_clip.avi") + video_file = str(tmp_dir / "dog_clip.avi") + try: # download dog test video from github: # Use raw.githubusercontent.com for direct file access if not Path(video_file).exists(): @@ -147,15 +148,23 @@ def main(): print("\nRunning PyTorch test...\n") run_pytorch_test(video_file, display=display) any_backend_succeeded = True + backend_results["pytorch"] = ("SUCCESS", None) elif backend == Engine.TENSORFLOW: print("\nRunning TensorFlow test...\n") run_tensorflow_test(video_file, display=display) any_backend_succeeded = True + backend_results["tensorflow"] = ("SUCCESS", None) else: warnings.warn( f"Unrecognized backend {backend}, skipping...", UserWarning ) except Exception as e: + backend_name = ( + "pytorch" if backend == Engine.PYTORCH else + "tensorflow" if backend == Engine.TENSORFLOW else + str(backend) + ) + backend_results[backend_name] = ("ERROR", str(e)) backend_failures[backend] = e warnings.warn( f"Error while running test for backend {backend}: {e}. " @@ -163,17 +172,27 @@ def main(): UserWarning, ) - if not any_backend_succeeded and backend_failures: - failure_messages = "; ".join( - f"{b}: {exc}" for b, exc in backend_failures.items() - ) - raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") + print("\n---\nBackend test summary:") + for name in ("tensorflow", "pytorch"): + status, _ = backend_results.get(name, ("SKIPPED", None)) + print(f"{name:<11} [{status}]") + print("---") + for name, (status, error) in backend_results.items(): + if status == "ERROR": + print(f"{name.capitalize()} error:\n{error}\n") + + if not any_backend_succeeded and backend_failures: + failure_messages = "; ".join( + f"{b}: {exc}" for b, exc in backend_failures.items() + ) + raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") + finally: # deleting temporary files print("\n Deleting temporary files...\n") try: - if tmp_dir is not None and tmp_dir.exists(): + if tmp_dir.exists(): shutil.rmtree(tmp_dir) except PermissionError: warnings.warn( From 059d838cca146a3edf55249ab7f3dd9bcbf68086 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Tue, 17 Feb 2026 12:08:27 +0100 Subject: [PATCH 7/9] Update dlclive/benchmark.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- dlclive/benchmark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dlclive/benchmark.py b/dlclive/benchmark.py index 5b6f58b..ca0a60f 100644 --- a/dlclive/benchmark.py +++ b/dlclive/benchmark.py @@ -266,7 +266,7 @@ def get_system_info() -> dict: dev_type = "GPU" dev = [torch.cuda.get_device_name(torch.cuda.current_device())] else: - from cpuinfo import get_cpu_info # noqa: F401 + from cpuinfo import get_cpu_info dev_type = "CPU" dev = get_cpu_info() From 0c516b6fdde9569acfdd9e275473492119dc1c4b Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Tue, 17 Feb 2026 12:10:26 +0100 Subject: [PATCH 8/9] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- dlclive/benchmark.py | 2 +- dlclive/check_install/check_install.py | 28 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dlclive/benchmark.py b/dlclive/benchmark.py index ca0a60f..de47548 100644 --- a/dlclive/benchmark.py +++ b/dlclive/benchmark.py @@ -721,7 +721,7 @@ def save_poses_to_files( ------- None """ - import pandas as pd # noqa: F401 + import pandas as pd base_filename = Path(video_path).stem save_dir = Path(save_dir) diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index cc34b10..7e1dedd 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -172,20 +172,20 @@ def main(): UserWarning, ) - print("\n---\nBackend test summary:") - for name in ("tensorflow", "pytorch"): - status, _ = backend_results.get(name, ("SKIPPED", None)) - print(f"{name:<11} [{status}]") - print("---") - for name, (status, error) in backend_results.items(): - if status == "ERROR": - print(f"{name.capitalize()} error:\n{error}\n") - - if not any_backend_succeeded and backend_failures: - failure_messages = "; ".join( - f"{b}: {exc}" for b, exc in backend_failures.items() - ) - raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") + print("\n---\nBackend test summary:") + for name in ("tensorflow", "pytorch"): + status, _ = backend_results.get(name, ("SKIPPED", None)) + print(f"{name:<11} [{status}]") + print("---") + for name, (status, error) in backend_results.items(): + if status == "ERROR": + print(f"{name.capitalize()} error:\n{error}\n") + + if not any_backend_succeeded and backend_failures: + failure_messages = "; ".join( + f"{b}: {exc}" for b, exc in backend_failures.items() + ) + raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") finally: From 299c4f61a7664a1f08a0f09aaedac38ad6fea036 Mon Sep 17 00:00:00 2001 From: Cyril Achard Date: Thu, 19 Feb 2026 18:50:00 +0000 Subject: [PATCH 9/9] Update dlclive/check_install/check_install.py Co-authored-by: Jaap de Ruyter van Steveninck <32810691+deruyter92@users.noreply.github.com> --- dlclive/check_install/check_install.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index 7e1dedd..6f0fea7 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -199,7 +199,6 @@ def main(): f"Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!" ) - print("\nDone!\n") if __name__ == "__main__":