|
| 1 | +from torch import Tensor |
| 2 | +from torch.optim import Optimizer |
| 3 | +from torch.optim.optimizer import ParamsT |
| 4 | +from dataclasses import dataclass |
| 5 | +from typing import Any, Dict, List, Type, Callable, Optional, Iterable |
| 6 | + |
| 7 | + |
| 8 | +@dataclass |
| 9 | +class OptimizerSpec: |
| 10 | + """Spec for creating an optimizer that is part of a `ChainedOptimizer`.""" |
| 11 | + |
| 12 | + class_type: Type[Optimizer] |
| 13 | + init_args: Dict[str, Any] |
| 14 | + param_filter: Optional[Callable[[Tensor], bool]] |
| 15 | + |
| 16 | + |
| 17 | +class ChainedOptimizer(Optimizer): |
| 18 | + """ |
| 19 | + A wrapper around multiple optimizers that allows for chaining them together. |
| 20 | + The optimizers are applied in the order they are passed in the constructor. |
| 21 | + Each optimizer is responsible for updating a subset of the parameters, which |
| 22 | + is determined by the `param_filter` function. If no optimizer is found for a |
| 23 | + parameter group, an exception is raised. |
| 24 | + """ |
| 25 | + |
| 26 | + def __init__( |
| 27 | + self, |
| 28 | + params: ParamsT, |
| 29 | + optimizer_specs: List[OptimizerSpec], |
| 30 | + lr: float, |
| 31 | + weight_decay: float = 0.0, |
| 32 | + optimizer_selection_callback: Optional[Callable[[Tensor, int], None]] = None, |
| 33 | + **common_kwargs, |
| 34 | + ): |
| 35 | + self.optimizer_specs = optimizer_specs |
| 36 | + self.optimizer_selection_callback = optimizer_selection_callback |
| 37 | + self.optimizers: List[Optimizer] = [] |
| 38 | + defaults = dict(lr=lr, weight_decay=weight_decay) |
| 39 | + super().__init__(params, defaults) |
| 40 | + |
| 41 | + # Split the params for each optimzier |
| 42 | + params_for_optimizers = [[] for _ in optimizer_specs] |
| 43 | + for param_group in self.param_groups: |
| 44 | + params = param_group["params"] |
| 45 | + indices = param_group["optimizer_and_param_group_indices"] = set() |
| 46 | + for param in params: |
| 47 | + assert isinstance(param, Tensor), f"Expected a Tensor, got {type(param)}" |
| 48 | + for index, spec in enumerate(optimizer_specs): |
| 49 | + if spec.param_filter is None or spec.param_filter(param): |
| 50 | + if self.optimizer_selection_callback is not None: |
| 51 | + self.optimizer_selection_callback(param, index) |
| 52 | + params_for_optimizers[index].append(param) |
| 53 | + indices.add((index, 0)) |
| 54 | + break |
| 55 | + |
| 56 | + # Initialize the optimizers |
| 57 | + for spec, selected_params in zip(optimizer_specs, params_for_optimizers): |
| 58 | + optimizer_args = { |
| 59 | + 'lr': lr, |
| 60 | + 'weight_decay': weight_decay, |
| 61 | + } |
| 62 | + optimizer_args.update(common_kwargs) |
| 63 | + optimizer_args.update(spec.init_args) |
| 64 | + optimizer = spec.class_type(selected_params, **optimizer_args) |
| 65 | + self.optimizers.append(optimizer) |
| 66 | + |
| 67 | + def state_dict(self) -> Dict[str, Any]: |
| 68 | + return { |
| 69 | + "optimizers": [opt.state_dict() for opt in self.optimizers], |
| 70 | + **super().state_dict(), |
| 71 | + } |
| 72 | + |
| 73 | + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: |
| 74 | + optimizers = state_dict.pop("optimizers") |
| 75 | + super().load_state_dict(state_dict) |
| 76 | + for i in range(len(self.optimizers)): |
| 77 | + self.optimizers[i].load_state_dict(optimizers[i]) |
| 78 | + |
| 79 | + def zero_grad(self, set_to_none: bool = True) -> None: |
| 80 | + for opt in self.optimizers: |
| 81 | + opt.zero_grad(set_to_none=set_to_none) |
| 82 | + |
| 83 | + def _copy_lr_to_optimizers(self) -> None: |
| 84 | + for param_group in self.param_groups: |
| 85 | + indices = param_group["optimizer_and_param_group_indices"] |
| 86 | + for optimizer_idx, param_group_idx in indices: |
| 87 | + self.optimizers[optimizer_idx].param_groups[param_group_idx]["lr"] = param_group["lr"] |
| 88 | + |
| 89 | + def step(self, closure=None) -> None: |
| 90 | + self._copy_lr_to_optimizers() |
| 91 | + for opt in self.optimizers: |
| 92 | + opt.step(closure) |
| 93 | + |
| 94 | + def add_param_group(self, param_group: Dict[str, Any]) -> None: |
| 95 | + super().add_param_group(param_group) |
| 96 | + |
| 97 | + # If optimizer has not been initialized, skip adding the param groups |
| 98 | + if not self.optimizers: |
| 99 | + return |
| 100 | + |
| 101 | + # Split the params for each optimzier |
| 102 | + params_for_optimizers = [[] for _ in self.optimizer_specs] |
| 103 | + params = param_group["params"] |
| 104 | + indices = param_group["optimizer_and_param_group_indices"] = set() |
| 105 | + for param in params: |
| 106 | + assert isinstance(param, Tensor), f"Expected a Tensor, got {type(param)}" |
| 107 | + found_optimizer = False |
| 108 | + for index, spec in enumerate(self.optimizer_specs): |
| 109 | + if spec.param_filter is None or spec.param_filter(param): |
| 110 | + if self.optimizer_selection_callback is not None: |
| 111 | + self.optimizer_selection_callback(param, index) |
| 112 | + params_for_optimizers[index].append(param) |
| 113 | + indices.add((index, len(self.optimizers[index].param_groups))) |
| 114 | + found_optimizer = True |
| 115 | + break |
| 116 | + if not found_optimizer: |
| 117 | + raise ValueError("No valid optimizer found for the given parameter group") |
| 118 | + |
| 119 | + # Add the selected param group to the optimizers |
| 120 | + for optimizer, selected_params in zip(self.optimizers, params_for_optimizers): |
| 121 | + if selected_params: |
| 122 | + optimizer.add_param_group({"params": selected_params}) |
0 commit comments