Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_get_roulette(self, num: int, expected: str) -> None:
np.random.seed(24)
archive = {"a": Value(0), "b": Value(1), "c": Value(2), "d": Value(3)}
output = mutations.get_roulette(archive, num)
np.testing.assert_equal(output, expected)
def test_pruning() -> None:
archive = utils.Archive[utils.Value]()
for k in range(3):
value = utils.Value(float(k))
archive[(float(k),)] = value
value = utils.Value(1.)
value.add_evaluation(1.)
archive[(3.,)] = value
# pruning
pruning = utils.Pruning(min_len=1, max_len=3)
# 0 is best optimistic and average, and 3 is best pessimistic (variance=0)
with pytest.warns(UserWarning):
archive = pruning(archive)
testing.assert_set_equal([x[0] for x in archive.keys_as_array()], [0, 3], err_msg=f"Repetition #{k+1}")
# should not change anything this time
archive = pruning(archive)
testing.assert_set_equal([x[0] for x in archive.keys_as_array()], [0, 3], err_msg=f"Repetition #{k+1}")
def test_get_nash() -> None:
zeroptim = Zero(instrumentation=1, budget=4, num_workers=1)
for k in range(4):
array = (float(k),)
zeroptim.archive[array] = utils.Value(k)
zeroptim.archive[array].count += (4 - k)
nash = utils._get_nash(zeroptim)
testing.printed_assert_equal(nash, [((2,), 3), ((1,), 4), ((0,), 5)])
np.random.seed(12)
output = utils.sample_nash(zeroptim)
np.testing.assert_equal(output, (2,))
def _update_archive_and_bests(self, x: ArrayLike, value: float) -> None:
if not isinstance(value, (Real, float)): # using "float" along "Real" because mypy does not understand "Real" for now Issue #3186
raise TypeError(f'"tell" method only supports float values but the passed value was: {value} (type: {type(value)}.')
if np.isnan(value) or value == np.inf:
warnings.warn(f"Updating fitness with {value} value")
if x not in self.archive:
self.archive[x] = utils.Value(value) # better not to stock the position as a Point (memory)
else:
self.archive[x].add_evaluation(value)
# update current best records
# this may have to be improved if we want to keep more kinds of best values
for name in ["optimistic", "pessimistic", "average"]:
if np.array_equal(x, self.current_bests[name].x): # reboot
y: bytes = min(self.archive.bytesdict, key=lambda z, n=name: self.archive.bytesdict[z].get_estimation(n)) # type: ignore
# rebuild best point may change, and which value did not track the updated value anyway
self.current_bests[name] = utils.Point(np.frombuffer(y), self.archive.bytesdict[y])
else:
if self.archive[x].get_estimation(name) <= self.current_bests[name].get_estimation(name):
self.current_bests[name] = utils.Point(x, self.archive[x])
if not (np.isnan(value) or value == np.inf):
assert self.current_bests[name].x in self.archive, "Best value should exist in the archive"
if self.pruning is not None:
self.archive = self.pruning(self.archive)
instrumentation
if isinstance(instrumentation, instru.Instrumentation)
else instru.Instrumentation(instru.var.Array(instrumentation))
)
if not self.dimension:
raise ValueError("No variable to optimize in this instrumentation.")
self.create_candidate = CandidateMaker(self.instrumentation)
self.name = self.__class__.__name__ # printed name in repr
# keep a record of evaluations, and current bests which are updated at each new evaluation
self.archive: utils.Archive[utils.Value] = utils.Archive() # dict like structure taking np.ndarray as keys and Value as values
self.current_bests = {
x: utils.Point(np.zeros(self.dimension, dtype=np.float), utils.Value(np.inf)) for x in ["optimistic", "pessimistic", "average"]
}
# pruning function, called at each "tell"
# this can be desactivated or modified by each implementation
self.pruning: Optional[Callable[[utils.Archive[utils.Value]], utils.Archive[utils.Value]]] = utils.Pruning.sensible_default(
num_workers=num_workers, dimension=self.instrumentation.dimension
)
# instance state
self._asked: Set[str] = set()
self._suggestions: Deque[Candidate] = deque()
self._num_ask = 0
self._num_tell = 0
self._num_tell_not_asked = 0
self._callbacks: Dict[str, List[Any]] = {}
# to make optimize function stoppable halway through
self._running_jobs: List[Tuple[Candidate, JobLike[float]]] = []
self._finished_jobs: Deque[Tuple[Candidate, JobLike[float]]] = deque()
def compare(self, winners: List[Candidate], losers: List[Candidate]) -> None:
# This means that for any i and j, winners[i] is better than winners[i+1], and better than losers[j].
# This is for cases in which we do not know fitness values, we just know comparisons.
# Evaluate the best fitness value among losers.
best_fitness_value = 0.
for l in losers:
if l.data in self.archive:
best_fitness_value = min(best_fitness_value, self.archive[l.data].get_estimation("average"))
# Now let us decide the fitness value of winners.
for i, w in enumerate(winners):
self.tell(w, best_fitness_value - len(winners) + i)
self.archive[w.data] = utils.Value(best_fitness_value - len(winners) + i)
---------
y: float
the new evaluation
"""
self.mean = (self.count * self.mean + y) / float(self.count + 1)
self.square = (self.count * self.square + y * y) / float(self.count + 1)
self.square = max(self.square, self.mean**2)
self.count += 1
factor: float = np.sqrt(float(self.count) / float(self.count - 1.))
self.variance = factor * (self.square - self.mean**2)
def __repr__(self) -> str:
return "Value".format(self.mean, self.count)
class Point(Value):
"""Coordinates and estimation of a point in space.
This class provides easy access to:
- x: the coordinates of the point
- count: how many times the point was evaluated
- mean: the mean value.
- square: the mean square value
- variance: the variance
It also provides access to optimistic and pessimistic bounds for the value.
Parameters
----------
x: array-like
the coordinates
value: Value
the value estimation instance