optimizer.pyi 1.1 KB

123456789101112131415161718192021222324
  1. from typing import Iterable, Union, Callable, Optional, List, Dict, Any
  2. from .. import Tensor
  3. from torch.utils.hooks import RemovableHandle
  4. _params_t = Union[Iterable[Tensor], Iterable[Dict[str, Any]]]
  5. def register_optimizer_step_pre_hook(hook: Callable[..., None]) -> RemovableHandle: ...
  6. def register_optimizer_step_post_hook(hook: Callable[..., None]) -> RemovableHandle: ...
  7. class Optimizer:
  8. defaults: Dict[str, Any]
  9. state: Dict[str, Any]
  10. param_groups: List[Dict[str, Any]]
  11. def __init__(self, params: _params_t, defaults: Dict[str, Any]) -> None: ...
  12. def __setstate__(self, state: Dict[str, Any]) -> None: ...
  13. def register_step_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle: ...
  14. def register_step_post_hook(self, hook: Callable[..., None]) -> RemovableHandle: ...
  15. def state_dict(self) -> Dict[str, Any]: ...
  16. def load_state_dict(self, state_dict: Dict[str, Any]) -> None: ...
  17. def zero_grad(self, set_to_none: bool=...) -> None: ...
  18. def step(self, closure: Optional[Callable[[], float]]=...) -> Optional[float]: ...
  19. def add_param_group(self, param_group: Dict[str, Any]) -> None: ...