diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 28d8b6c..62b5956 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -69,7 +69,7 @@ jobs: shell: bash - name: Run Model Benchmark Test - run: uv run dlc-live-test --nodisplay + run: uv run dlc-live-test - name: Run DLC Live Unit Tests run: uv run pytest diff --git a/dlclive/benchmark.py b/dlclive/benchmark.py index 8b4a0b4..de47548 100644 --- a/dlclive/benchmark.py +++ b/dlclive/benchmark.py @@ -10,8 +10,10 @@ import sys import time import warnings +from typing import TYPE_CHECKING from pathlib import Path - +import argparse +import os import colorcet as cc import cv2 import numpy as np @@ -23,10 +25,12 @@ from dlclive import DLCLive from dlclive import VERSION -from dlclive import __file__ as dlcfile from dlclive.engine import Engine from dlclive.utils import decode_fourcc +if TYPE_CHECKING: + import tensorflow # type: ignore + def download_benchmarking_data( target_dir=".", @@ -49,17 +53,20 @@ def download_benchmarking_data( if os.path.exists(zip_path): print(f"{zip_path} already exists. Skipping download.") else: + def show_progress(count, block_size, total_size): pbar.update(block_size) print(f"Downloading the benchmarking data from {url} ...") pbar = tqdm(unit="B", total=0, position=0, desc="Downloading") - filename, _ = urllib.request.urlretrieve(url, filename=zip_path, reporthook=show_progress) + filename, _ = urllib.request.urlretrieve( + url, filename=zip_path, reporthook=show_progress + ) pbar.close() print(f"Extracting {zip_path} to {target_dir} ...") - with zipfile.ZipFile(zip_path, 'r') as zip_ref: + with zipfile.ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(target_dir) @@ -81,6 +88,7 @@ def benchmark_videos( cmap="bmy", save_poses=False, save_video=False, + single_animal=False, ): """Analyze videos using DeepLabCut-live exported models. Analyze multiple videos and/or multiple options for the size of the video @@ -168,7 +176,7 @@ def benchmark_videos( im_size_out = [] for i in range(len(resize)): - print(f"\nRun {i+1} / {len(resize)}\n") + print(f"\nRun {i + 1} / {len(resize)}\n") this_inf_times, this_im_size, meta = benchmark( model_path=model_path, @@ -188,6 +196,7 @@ def benchmark_videos( save_poses=save_poses, save_video=save_video, save_dir=output, + single_animal=single_animal, ) inf_times.append(this_inf_times) @@ -275,9 +284,7 @@ def get_system_info() -> dict: } -def save_inf_times( - sys_info, inf_times, im_size, model=None, meta=None, output=None -): +def save_inf_times(sys_info, inf_times, im_size, model=None, meta=None, output=None): """Save inference time data collected using :function:`benchmark` with system information to a pickle file. This is primarily used through :function:`benchmark_videos` @@ -346,6 +353,7 @@ def save_inf_times( return True + def benchmark( model_path: str, model_type: str, @@ -357,8 +365,8 @@ def benchmark( single_animal: bool = True, cropping: list[int] | None = None, dynamic: tuple[bool, float, int] = (False, 0.5, 10), - n_frames: int =1000, - print_rate: bool=False, + n_frames: int = 1000, + print_rate: bool = False, precision: str = "FP32", display: bool = True, pcutoff: float = 0.5, @@ -434,7 +442,10 @@ def benchmark( if not cap.isOpened(): print(f"Error: Could not open video file {video_path}") return - im_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) + im_size = ( + int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), + ) if pixels is not None: resize = np.sqrt(pixels / (im_size[0] * im_size[1])) @@ -492,9 +503,7 @@ def benchmark( total_n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) n_frames = int( - n_frames - if (n_frames > 0) and n_frames < total_n_frames - else total_n_frames + n_frames if (n_frames > 0) and n_frames < total_n_frames else total_n_frames ) iterator = range(n_frames) if print_rate or display else tqdm(range(n_frames)) for _ in iterator: @@ -510,7 +519,7 @@ def benchmark( start_time = time.perf_counter() if frame_index == 0: - pose = dlc_live.init_inference(frame) # Loads model + pose = dlc_live.init_inference(frame) # Loads model else: pose = dlc_live.get_pose(frame) @@ -519,7 +528,9 @@ def benchmark( times.append(inf_time) if print_rate: - print("Inference rate = {:.3f} FPS".format(1 / inf_time), end="\r", flush=True) + print( + "Inference rate = {:.3f} FPS".format(1 / inf_time), end="\r", flush=True + ) if save_video: draw_pose_and_write( @@ -531,19 +542,17 @@ def benchmark( pcutoff=pcutoff, display_radius=display_radius, draw_keypoint_names=draw_keypoint_names, - vwriter=vwriter + vwriter=vwriter, ) frame_index += 1 if print_rate: - print("Mean inference rate: {:.3f} FPS".format(np.mean(1 / np.array(times)[1:]))) + print( + "Mean inference rate: {:.3f} FPS".format(np.mean(1 / np.array(times)[1:])) + ) - metadata = _get_metadata( - video_path=video_path, - cap=cap, - dlc_live=dlc_live - ) + metadata = _get_metadata(video_path=video_path, cap=cap, dlc_live=dlc_live) cap.release() @@ -558,19 +567,21 @@ def benchmark( else: individuals = [] n_individuals = len(individuals) or 1 - save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, timestamp=timestamp) + save_poses_to_files( + video_path, save_dir, n_individuals, bodyparts, poses, timestamp=timestamp + ) return times, im_size, metadata def setup_video_writer( - video_path:str, - save_dir:str, - timestamp:str, - num_keypoints:int, - cmap:str, - fps:float, - frame_size:tuple[int, int], + video_path: str, + save_dir: str, + timestamp: str, + num_keypoints: int, + cmap: str, + fps: float, + frame_size: tuple[int, int], ): # Set colors and convert to RGB cmap_colors = getattr(cc, cmap) @@ -582,7 +593,9 @@ def setup_video_writer( # Define output video path video_path = Path(video_path) video_name = video_path.stem # filename without extension - output_video_path = Path(save_dir) / f"{video_name}_DLCLIVE_LABELLED_{timestamp}.mp4" + output_video_path = ( + Path(save_dir) / f"{video_name}_DLCLIVE_LABELLED_{timestamp}.mp4" + ) # Get video writer setup fourcc = cv2.VideoWriter_fourcc(*"mp4v") @@ -595,6 +608,7 @@ def setup_video_writer( return colors, vwriter + def draw_pose_and_write( frame: np.ndarray, pose: np.ndarray, @@ -611,7 +625,9 @@ def draw_pose_and_write( if resize is not None and resize != 1.0: # Resize the frame - frame = cv2.resize(frame, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) + frame = cv2.resize( + frame, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR + ) # Scale pose coordinates pose = pose.copy() @@ -642,15 +658,10 @@ def draw_pose_and_write( lineType=cv2.LINE_AA, ) - vwriter.write(image=frame) -def _get_metadata( - video_path: str, - cap: cv2.VideoCapture, - dlc_live: DLCLive -): +def _get_metadata(video_path: str, cap: cv2.VideoCapture, dlc_live: DLCLive): try: fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC)) except Exception: @@ -687,7 +698,9 @@ def _get_metadata( return meta -def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, timestamp): +def save_poses_to_files( + video_path, save_dir, n_individuals, bodyparts, poses, timestamp +): """ Saves the detected keypoint poses from the video to CSV and HDF5 files. @@ -725,7 +738,8 @@ def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, t else: individuals = [f"individual_{i}" for i in range(n_individuals)] pdindex = pd.MultiIndex.from_product( - [individuals, bodyparts, ["x", "y", "likelihood"]], names=["individuals", "bodyparts", "coords"] + [individuals, bodyparts, ["x", "y", "likelihood"]], + names=["individuals", "bodyparts", "coords"], ) pose_df = pd.DataFrame(flattened_poses, columns=pdindex) @@ -733,6 +747,7 @@ def save_poses_to_files(video_path, save_dir, n_individuals, bodyparts, poses, t pose_df.to_hdf(h5_save_path, key="df_with_missing", mode="w") pose_df.to_csv(csv_save_path, index=False) + def _create_poses_np_array(n_individuals: int, bodyparts: list, poses: list): # Create numpy array with poses: max_frame = max(p["frame"] for p in poses) @@ -745,17 +760,15 @@ def _create_poses_np_array(n_individuals: int, bodyparts: list, poses: list): if pose.ndim == 2: pose = pose[np.newaxis, :, :] padded_pose = np.full(pose_target_shape, np.nan) - slices = tuple(slice(0, min(pose.shape[i], pose_target_shape[i])) for i in range(3)) + slices = tuple( + slice(0, min(pose.shape[i], pose_target_shape[i])) for i in range(3) + ) padded_pose[slices] = pose[slices] poses_array[frame] = padded_pose return poses_array -import argparse -import os - - def main(): """Provides a command line interface to benchmark_videos function.""" parser = argparse.ArgumentParser( diff --git a/dlclive/check_install/check_install.py b/dlclive/check_install/check_install.py index ae3e569..6f0fea7 100755 --- a/dlclive/check_install/check_install.py +++ b/dlclive/check_install/check_install.py @@ -8,11 +8,11 @@ import argparse import shutil import warnings +import urllib.error from pathlib import Path from dlclibrary.dlcmodelzoo.modelzoo_download import download_huggingface_model -import dlclive from dlclive.utils import download_file from dlclive.benchmark import benchmark_videos from dlclive.engine import Engine @@ -20,89 +20,194 @@ MODEL_NAME = "superanimal_quadruped" SNAPSHOT_NAME = "snapshot-700000.pb" +TMP_DIR = Path(__file__).parent / "dlc-live-tmp" + +MODELS_DIR = TMP_DIR / "test_models" +TORCH_MODEL = "resnet_50" +TORCH_CONFIG = { + "checkpoint": MODELS_DIR / f"exported_quadruped_{TORCH_MODEL}.pt", + "super_animal": "superanimal_quadruped", +} +TF_MODEL_DIR = MODELS_DIR / "DLC_Dog_resnet_50_iteration-0_shuffle-0" + + +def run_pytorch_test(video_file: str, display: bool = False): + from dlclive.modelzoo.pytorch_model_zoo_export import export_modelzoo_model + + if Engine.PYTORCH not in get_available_backends(): + raise ImportError( + "PyTorch backend is not available. Please ensure PyTorch is installed to run the PyTorch test." + ) + # Download model from the DeepLabCut Model Zoo + export_modelzoo_model( + export_path=TORCH_CONFIG["checkpoint"], + super_animal=TORCH_CONFIG["super_animal"], + model_name=TORCH_MODEL, + ) + assert TORCH_CONFIG["checkpoint"].exists(), ( + f"Failed to export {TORCH_CONFIG['super_animal']} model" + ) + assert TORCH_CONFIG["checkpoint"].stat().st_size > 0, ( + f"Exported {TORCH_CONFIG['super_animal']} model is empty" + ) + benchmark_videos( + model_path=str(TORCH_CONFIG["checkpoint"]), + model_type="pytorch", + video_path=video_file, + display=display, + # resize=0.5, + pcutoff=0.25, + pixels=1000, + ) + + +def run_tensorflow_test(video_file: str, display: bool = False): + if Engine.TENSORFLOW not in get_available_backends(): + raise ImportError( + "TensorFlow backend is not available. Please ensure TensorFlow is installed to run the TensorFlow test." + ) + model_dir = TF_MODEL_DIR + model_dir.mkdir(parents=True, exist_ok=True) + if Path(model_dir / SNAPSHOT_NAME).exists(): + print("Model already downloaded, using cached version") + else: + print( + "Downloading superanimal_quadruped model from the DeepLabCut Model Zoo..." + ) + download_huggingface_model(MODEL_NAME, str(model_dir)) + + assert Path(model_dir / SNAPSHOT_NAME).exists(), ( + f"Missing model file {model_dir / SNAPSHOT_NAME}" + ) + + benchmark_videos( + model_path=str(model_dir), + model_type="base", + video_path=video_file, + display=display, + # resize=0.5, + pcutoff=0.25, + pixels=1000, + ) def main(): + backend_results = {} + parser = argparse.ArgumentParser( description="Test DLC-Live installation by downloading and evaluating a demo DLC project!" ) + parser.add_argument( + "--display", + action="store_true", + default=False, + help="Run the test and display tracking", + ) parser.add_argument( "--nodisplay", action="store_false", - help="Run the test without displaying tracking", + dest="display", + help=argparse.SUPPRESS, ) + args = parser.parse_args() - display = args.nodisplay + display = args.display if not display: print("Running without displaying video") # make temporary directory print("\nCreating temporary directory...\n") - tmp_dir = Path(dlclive.__file__).parent / "check_install" / "dlc-live-tmp" + tmp_dir = TMP_DIR tmp_dir.mkdir(mode=0o775, exist_ok=True) + MODELS_DIR.mkdir(parents=True, exist_ok=True) video_file = str(tmp_dir / "dog_clip.avi") - model_dir = tmp_dir / "DLC_Dog_resnet_50_iteration-0_shuffle-0" - - # download dog test video from github: - # Use raw.githubusercontent.com for direct file access - if not Path(video_file).exists(): - print(f"Downloading Video to {video_file}") - url_link = "https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-live/master/check_install/dog_clip.avi" - try: - download_file(url_link, video_file) - except (urllib.error.URLError, IOError) as e: - raise RuntimeError(f"Failed to download video file: {e}") from e - else: - print(f"Video file already exists at {video_file}, skipping download.") - - # download model from the DeepLabCut Model Zoo - if Path(model_dir / SNAPSHOT_NAME).exists(): - print("Model already downloaded, using cached version") - else: - print("Downloading superanimal_quadruped model from the DeepLabCut Model Zoo...") - download_huggingface_model(MODEL_NAME, model_dir) - - # assert these things exist so we can give informative error messages - assert Path(video_file).exists(), f"Missing video file {video_file}" - assert Path( - model_dir / SNAPSHOT_NAME - ).exists(), f"Missing model file {model_dir / SNAPSHOT_NAME}" - - # run benchmark videos - print("\n Running inference...\n") - benchmark_videos( - model_path=str(model_dir), - model_type="base" if Engine.from_model_path(model_dir) == Engine.TENSORFLOW else "pytorch", - video_path=video_file, - display=display, - resize=0.5, - pcutoff=0.25 - ) - # deleting temporary files - print("\n Deleting temporary files...\n") try: - shutil.rmtree(tmp_dir) - except PermissionError: - warnings.warn( - f"Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!" - ) + # download dog test video from github: + # Use raw.githubusercontent.com for direct file access + if not Path(video_file).exists(): + print(f"Downloading Video to {video_file}") + url_link = "https://raw.githubusercontent.com/DeepLabCut/DeepLabCut-live/master/check_install/dog_clip.avi" + try: + download_file(url_link, video_file) + except (urllib.error.URLError, IOError) as e: + raise RuntimeError(f"Failed to download video file: {e}") from e + else: + print(f"Video file already exists at {video_file}, skipping download.") + + # assert these things exist so we can give informative error messages + assert Path(video_file).exists(), f"Missing video file {video_file}" + backend_failures = {} + any_backend_succeeded = False + + for backend in get_available_backends(): + try: + if backend == Engine.PYTORCH: + print("\nRunning PyTorch test...\n") + run_pytorch_test(video_file, display=display) + any_backend_succeeded = True + backend_results["pytorch"] = ("SUCCESS", None) + elif backend == Engine.TENSORFLOW: + print("\nRunning TensorFlow test...\n") + run_tensorflow_test(video_file, display=display) + any_backend_succeeded = True + backend_results["tensorflow"] = ("SUCCESS", None) + else: + warnings.warn( + f"Unrecognized backend {backend}, skipping...", UserWarning + ) + except Exception as e: + backend_name = ( + "pytorch" if backend == Engine.PYTORCH else + "tensorflow" if backend == Engine.TENSORFLOW else + str(backend) + ) + backend_results[backend_name] = ("ERROR", str(e)) + backend_failures[backend] = e + warnings.warn( + f"Error while running test for backend {backend}: {e}. " + "Continuing to test other available backends.", + UserWarning, + ) + + print("\n---\nBackend test summary:") + for name in ("tensorflow", "pytorch"): + status, _ = backend_results.get(name, ("SKIPPED", None)) + print(f"{name:<11} [{status}]") + print("---") + for name, (status, error) in backend_results.items(): + if status == "ERROR": + print(f"{name.capitalize()} error:\n{error}\n") + + if not any_backend_succeeded and backend_failures: + failure_messages = "; ".join( + f"{b}: {exc}" for b, exc in backend_failures.items() + ) + raise RuntimeError(f"All backend tests failed. Details: {failure_messages}") + + + finally: + # deleting temporary files + print("\n Deleting temporary files...\n") + try: + if tmp_dir.exists(): + shutil.rmtree(tmp_dir) + except PermissionError: + warnings.warn( + f"Could not delete temporary directory {str(tmp_dir)} due to a permissions error, but otherwise dlc-live seems to be working fine!" + ) - print("\nDone!\n") if __name__ == "__main__": - # Get available backends (emits a warning if neither TensorFlow nor PyTorch is installed) available_backends: list[Engine] = get_available_backends() print(f"Available backends: {[b.value for b in available_backends]}") - - # TODO: JR add support for PyTorch in check_install.py (requires some exported pytorch model to be downloaded) - if not Engine.TENSORFLOW in available_backends: + if len(available_backends) == 0: raise NotImplementedError( - "TensorFlow is not installed. Currently check_install.py only supports testing the TensorFlow installation." + "Neither TensorFlow nor PyTorch is installed. Please install at least one of these frameworks to run the installation test." ) main()