|
7 | 7 | # https://github.com/DeepLabCut/DeepLabCut/blob/main/AUTHORS |
8 | 8 | # |
9 | 9 | # Licensed under GNU Lesser General Public License v3.0 |
10 | | -# |
| 10 | + |
| 11 | +# NOTE DUPLICATED @C-Achard 2026-26-01: Duplication between this file |
| 12 | +# and deeplabcut/pose_estimation_pytorch/runners/dynamic_cropping.py |
| 13 | +# NOTE Testing already exists at deeplabcut/tests/pose_estimation_pytorch/runners/test_dynamic_cropper.py |
11 | 14 | """Modules to dynamically crop individuals out of videos to improve video analysis""" |
| 15 | + |
12 | 16 | from __future__ import annotations |
13 | 17 |
|
14 | 18 | import math |
15 | 19 | from dataclasses import dataclass, field |
16 | | -from typing import Optional |
17 | 20 |
|
18 | 21 | import torch |
19 | 22 | import torchvision.transforms.functional as F |
@@ -79,10 +82,7 @@ def crop(self, image: torch.Tensor) -> torch.Tensor: |
79 | 82 | height. |
80 | 83 | """ |
81 | 84 | if len(image) != 1: |
82 | | - raise RuntimeError( |
83 | | - "DynamicCropper can only be used with batch size 1 (found image " |
84 | | - f"shape: {image.shape})" |
85 | | - ) |
| 85 | + raise RuntimeError(f"DynamicCropper can only be used with batch size 1 (found image shape: {image.shape})") |
86 | 86 |
|
87 | 87 | if self._shape is None: |
88 | 88 | self._shape = image.shape[3], image.shape[2] |
@@ -114,7 +114,7 @@ def update(self, pose: torch.Tensor) -> torch.Tensor: |
114 | 114 | The pose, with coordinates updated to the full image space. |
115 | 115 | """ |
116 | 116 | if self._shape is None: |
117 | | - raise RuntimeError(f"You must call `crop` before calling `update`.") |
| 117 | + raise RuntimeError("You must call `crop` before calling `update`.") |
118 | 118 |
|
119 | 119 | # offset the pose to the original image space |
120 | 120 | offset_x, offset_y = 0, 0 |
@@ -153,9 +153,7 @@ def reset(self) -> None: |
153 | 153 | self._crop = None |
154 | 154 |
|
155 | 155 | @staticmethod |
156 | | - def build( |
157 | | - dynamic: bool, threshold: float, margin: int |
158 | | - ) -> Optional["DynamicCropper"]: |
| 156 | + def build(dynamic: bool, threshold: float, margin: int) -> DynamicCropper | None: |
159 | 157 | """Builds the DynamicCropper based on the given parameters |
160 | 158 |
|
161 | 159 | Args: |
@@ -309,10 +307,7 @@ def crop(self, image: torch.Tensor) -> torch.Tensor: |
309 | 307 | `crop` was previously called with an image of a different W or H. |
310 | 308 | """ |
311 | 309 | if len(image) != 1: |
312 | | - raise RuntimeError( |
313 | | - "DynamicCropper can only be used with batch size 1 (found image " |
314 | | - f"shape: {image.shape})" |
315 | | - ) |
| 310 | + raise RuntimeError(f"DynamicCropper can only be used with batch size 1 (found image shape: {image.shape})") |
316 | 311 |
|
317 | 312 | if self._shape is None: |
318 | 313 | self._shape = image.shape[3], image.shape[2] |
@@ -349,7 +344,7 @@ def update(self, pose: torch.Tensor) -> torch.Tensor: |
349 | 344 | The pose, with coordinates updated to the full image space. |
350 | 345 | """ |
351 | 346 | if self._shape is None: |
352 | | - raise RuntimeError(f"You must call `crop` before calling `update`.") |
| 347 | + raise RuntimeError("You must call `crop` before calling `update`.") |
353 | 348 |
|
354 | 349 | # check whether this was a patched crop |
355 | 350 | batch_size = pose.shape[0] |
@@ -399,9 +394,7 @@ def update(self, pose: torch.Tensor) -> torch.Tensor: |
399 | 394 |
|
400 | 395 | return pose |
401 | 396 |
|
402 | | - def _prepare_bounding_box( |
403 | | - self, x1: int, y1: int, x2: int, y2: int |
404 | | - ) -> tuple[int, int, int, int]: |
| 397 | + def _prepare_bounding_box(self, x1: int, y1: int, x2: int, y2: int) -> tuple[int, int, int, int]: |
405 | 398 | """Prepares the bounding box for cropping. |
406 | 399 |
|
407 | 400 | Adds a margin around the bounding box, then transforms it into the target aspect |
@@ -498,12 +491,8 @@ def generate_patches(self) -> list[tuple[int, int, int, int]]: |
498 | 491 | Returns: |
499 | 492 | A list of patch coordinates as tuples (x0, y0, x1, y1). |
500 | 493 | """ |
501 | | - patch_xs = self.split_array( |
502 | | - self._shape[0], self._patch_counts[0], self._patch_overlap |
503 | | - ) |
504 | | - patch_ys = self.split_array( |
505 | | - self._shape[1], self._patch_counts[1], self._patch_overlap |
506 | | - ) |
| 494 | + patch_xs = self.split_array(self._shape[0], self._patch_counts[0], self._patch_overlap) |
| 495 | + patch_ys = self.split_array(self._shape[1], self._patch_counts[1], self._patch_overlap) |
507 | 496 |
|
508 | 497 | patches = [] |
509 | 498 | for y0, y1 in patch_ys: |
@@ -534,7 +523,7 @@ def split_array(size: int, n: int, overlap: int) -> list[tuple[int, int]]: |
534 | 523 | segment_size = (padded_size // n) + (padded_size % n > 0) |
535 | 524 | segments = [] |
536 | 525 | end = overlap |
537 | | - for i in range(n): |
| 526 | + for _i in range(n): |
538 | 527 | start = end - overlap |
539 | 528 | end = start + segment_size |
540 | 529 | if end > size: |
|
0 commit comments