# new_lrs[:5] = lr_warm [12] TypeError: can only assign an iterable

new_lrs[:5] = lr_warm
[12] TypeError: can only assign an iterable


explain:

In Python, using list [0:3] =’xxx ‘will not cause an error, so that the elements with subscripts of 0, 1, 2 are assigned to’ xxx ‘; This is because the string itself is a character array in Python, which can be iterated.

In list [0:2] = 1, an error will be generated: typeerror: can only assign an Iterable

This is because integer 1, which has no iteration ability, is a value. If the goal is not achieved, write list [0:2] = (1,)

The right side of this assignment must be an iteratable type, not an integer, but [int] is OK

lr =[0.0001,0.00012,0.00013]
new_lrs = [0.001, 0.0009,0.0008,0.0007,0.0006]
new_lrs[:3] = lr
new_lrs
Out[5]: [0.0001, 0.00012, 0.00013, 0.0007, 0.0006]


It is encountered in the process of adding learning rate to warmup. The complete code is as follows

import torch
import math
from torch.optim.lr_scheduler import _LRScheduler

class CosineAnnealingLR_with_Restart(_LRScheduler):
"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:\eta_{max} is set to the initial lr and
:math:T_{cur} is the number of epochs since the last restart in SGDR:

.. math::

\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))

When last_epoch=-1, sets initial lr as lr.

It has been proposed in
SGDR: Stochastic Gradient Descent with Warm Restarts_. The original pytorch
implementation only implements the cosine annealing part of SGDR,
I added my own implementation of the restarts part.

Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
T_mult (float): Increase T_max by a factor of T_mult
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
model (pytorch model): The model to save.
out_dir (str): Directory to save snapshots
take_snapshot (bool): Whether to save snapshots at every restart

.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""

def __init__(self, optimizer, T_max, T_mult, model, out_dir, take_snapshot, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.T_mult = T_mult
self.Te = self.T_max
self.eta_min = eta_min
self.current_epoch = last_epoch

self.model = model
self.out_dir = out_dir
self.take_snapshot = take_snapshot

self.lr_history = []

super(CosineAnnealingLR_with_Restart, self).__init__(optimizer, last_epoch)

def get_lr(self):
if self.current_epoch < 5:
warm_factor = (cfg['train']['lr']/cfg['train']['warmup_start_lr']) ** (1/cfg['train']['warmup_epochs'])
lr = cfg['train']['warmup_start_lr'] * warm_factor ** self.current_epoch
new_lrs = [lr]
else:
new_lrs = [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.current_epoch/self.Te))/2
for base_lr in self.base_lrs]

#new_lrs[:5] = lr_warm
#self.lr_history.append(new_lrs)
#print('new_lrs', new_lrs,len(new_lrs))
return new_lrs

def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.current_epoch += 1

for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr

## restart
if self.current_epoch == self.Te:
print("restart at epoch {:03d}".format(self.last_epoch + 1))

if self.take_snapshot:
torch.save({
'epoch': self.T_max,
'state_dict': self.model.state_dict()
}, self.out_dir + "Weight/" + 'snapshot_e_{:03d}.pth.tar'.format(self.T_max))

## reset epochs since the last reset
self.current_epoch = 0

## reset the next goal
self.Te = int(self.Te * self.T_mult)
self.T_max = self.T_max + self.Te