Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
`min(lr)`.
finetune_fraction (float): fraction of steps used for the fine tuning.
Must be between 0 and 1.
Returns:
A list of configured :class:`~poutyne.framework.callbacks.policies.Phase` instances.
References:
`Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates
`_
"""
steps_annealing = int(steps * finetune_fraction)
steps_up = (steps - steps_annealing) // 2
steps_down = steps - steps_annealing - steps_up
return [
Phase(
lr=linspace(lr[0], lr[1], steps_up),
momentum=linspace(momentum[0], momentum[1], steps_up),
),
Phase(
lr=linspace(lr[1], lr[0], steps_down),
momentum=linspace(momentum[1], momentum[0], steps_down),
),
Phase(
lr=linspace(lr[0], finetune_lr, steps_annealing),
momentum=linspace(momentum[0], momentum[0], steps_annealing),
),
def setUp(self):
steps = 2
phases = [Phase(lr=linspace(1, 1, steps)), Phase(lr=linspace(0, 0, steps))]
self.policy = OptimizerPolicy(phases)
def test_phase_with_two_parameters(self):
steps = 4
phase = Phase(lr=linspace(1, 0, steps), momentum=cosinespace(.8, 1, steps))
self.assertEqual(len(list(phase)), steps)
for params in phase:
self.assertEqual(len(params), 2)
self.assertTrue("lr" in params)
self.assertTrue(0 <= params["lr"] <= 1)
self.assertTrue("momentum" in params)
self.assertTrue(.8 <= params["momentum"] <= 1)
def test_phase_with_only_one_parameter_set(self):
for param_name in ["lr", "momentum"]:
steps = 3
phase = Phase(**{param_name: linspace(1, 0, steps)})
for params in phase:
self.assertIsInstance(params, dict)
self.assertTrue(param_name in params)
self.assertEqual(len(params), 1)
self.assertTrue(0 <= params[param_name] <= 1)
def test_init_raises_without_lr_or_momentum(self):
with self.assertRaises(ValueError):
Phase(lr=None, momentum=None)
with self.assertRaises(ValueError):
Phase()
def test_init_raises_without_lr_or_momentum(self):
with self.assertRaises(ValueError):
Phase(lr=None, momentum=None)
with self.assertRaises(ValueError):
Phase()
Args:
base_cycle_length (int): number of steps for the first cycle.
cycles (int): the number of repetitions.
lr (Typle[float, float]): tuple for the learning rate for one cycle: (start, end).
cycle_mult (float): multiply the last cycle length with this every cycle. The length of a cycle
grows exponentially.
Returns:
A list of configured :class:`~poutyne.framework.callbacks.policies.Phase` instances.
References:
`SGDR: Stochastic Gradient Descent with Warm Restarts
`_
"""
steps = [base_cycle_length * (cycle_mult**i) for i in range(cycles)]
return [Phase(lr=cosinespace(lr[0], lr[1], step)) for step in steps]