import math
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer, required
[docs]
class RAdam(Optimizer):
"""RAdam optimizer, a theoretically sound variant of Adam.
Source: `LiyuanLucasLiu/RAdam <https://github.com/LiyuanLucasLiu/RAdam/blob/master/radam/radam.py>`_
Under Apache License 2.0
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
[docs]
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) /
N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(
exp_avg, denom, value=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * group['lr'])
p_data_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
return loss
[docs]
class Lookahead(Optimizer):
'''Lookahead Wrapper
* Code: `lonePatient/lookahead_pytorch <https://github.com/lonePatient/lookahead_pytorch/blob/master/optimizer.py>`_
* Paper: `Lookahead Optimizer <https://arxiv.org/abs/1907.08610>`_
Works best with `LookaheadCallback` or `LookaheadModelCheckpoint`.
Args:
optimizer (Optimizer): The inner optimizer.
alpha (float, optional): The linear interpolation factor. 1.0 recovers the inner optimizer. Defaults to 0.5.
k (int, optional): The number of lookahead steps. Defaults to 6.
pullback_momentum (str, optional): Change to inner optimizer momentum on interpolation update. Defaults to "none".
.. note::
Currently `pullback_momentum` only supports SGD optimizers with momentum.
Raises:
ValueError: Invalid slow update rate or invalid lookahead steps
Example:
>>> model = torch.nn.Linear(10, 1)
>>> optimizer = Lookahead(
... torch.optim.SGD(model.parameters(), momentum=0.9, lr=0.1),
... alpha=0.5, k=6, pullback_momentum="pullback")
...
>>> for _ in range(10):
... optimizer.zero_grad()
... loss = model(torch.rand(10))
... loss.backward()
... optimizer.step()
...
'''
def __init__(self, optimizer: Optimizer, alpha: float = 0.5, k: int = 6, pullback_momentum: str = "none"):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
self.step_counter = 0
assert pullback_momentum in ["reset", "pullback", "none"]
self.pullback_momentum = pullback_momentum
self.state = defaultdict(dict)
self.defaults = {}
# Cache the current optimizer parameters
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['cached_params'] = torch.zeros_like(p.data)
param_state['cached_params'].copy_(p.data)
def __getstate__(self):
return {
'state': self.state,
'optimizer': self.optimizer,
'alpha': self.alpha,
'step_counter': self.step_counter,
'k': self.k,
'pullback_momentum': self.pullback_momentum
}
[docs]
def zero_grad(self):
self.optimizer.zero_grad()
[docs]
def state_dict(self):
return self.optimizer.state_dict()
[docs]
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def _backup_and_load_cache(self):
"""Useful for performing evaluation on the slow weights (which typically generalize better)
"""
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['backup_params'] = torch.zeros_like(p.data)
param_state['backup_params'].copy_(p.data)
p.data.copy_(param_state['cached_params'])
def _clear_and_load_backup(self):
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.copy_(param_state['backup_params'])
del param_state['backup_params']
[docs]
def step(self, closure=None):
"""Performs a single Lookahead optimization step.
"""
loss = self.optimizer.step(closure)
self.step_counter += 1
if self.step_counter >= self.k:
self.step_counter = 0
# Lookahead and cache the current optimizer parameters
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.mul_(self.alpha).add_(
param_state['cached_params'],
alpha=1.0 - self.alpha
) # crucial line
param_state['cached_params'].copy_(p.data)
if self.pullback_momentum == "pullback":
if "cached_mom" in param_state:
internal_momentum = self.optimizer.state[p]["momentum_buffer"]
self.optimizer.state[p]["momentum_buffer"] = internal_momentum.mul_(self.alpha).add_(
1.0 - self.alpha, param_state["cached_mom"])
param_state["cached_mom"] = self.optimizer.state[p]["momentum_buffer"]
elif self.pullback_momentum == "reset":
self.optimizer.state[p]["momentum_buffer"] = torch.zeros_like(
p.data)
return loss