How to use the statistics.StatisticsError function in statistics

To help you get started, we’ve selected a few statistics examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lschoe / mpyc / tests / test_statistics.py View on Github external
def test_statistics_error(self):
        self.assertRaises(statistics.StatisticsError, mean, [])
        self.assertRaises(statistics.StatisticsError, variance, [0])
        self.assertRaises(statistics.StatisticsError, stdev, [0])
        self.assertRaises(statistics.StatisticsError, pvariance, [])
        self.assertRaises(statistics.StatisticsError, pstdev, [])
        self.assertRaises(statistics.StatisticsError, mode, [])
        self.assertRaises(statistics.StatisticsError, median, [])
github lschoe / mpyc / tests / test_statistics.py View on Github external
def test_statistics_error(self):
        self.assertRaises(statistics.StatisticsError, mean, [])
        self.assertRaises(statistics.StatisticsError, variance, [0])
        self.assertRaises(statistics.StatisticsError, stdev, [0])
        self.assertRaises(statistics.StatisticsError, pvariance, [])
        self.assertRaises(statistics.StatisticsError, pstdev, [])
        self.assertRaises(statistics.StatisticsError, mode, [])
        self.assertRaises(statistics.StatisticsError, median, [])
github cltl / pepper / test / object_properties / util.py View on Github external
r_list, g_list, b_list = separate_rgb_channels(rgb_array)
    mean_rgb = [mean(r_list), mean(g_list), mean(b_list)]
    median_rgb = [median(r_list), median(g_list), median(b_list)]

    try:
        mode_r = mode(r_list)
    except statistics.StatisticsError:
        mode_r = manual_mode(r_list)
    try:
        mode_g = mode(g_list)
    except statistics.StatisticsError:
        mode_g = manual_mode(g_list)
    try:
        mode_b = mode(b_list)
    except statistics.StatisticsError:
        mode_b = manual_mode(b_list)

    mode_rgb = [mode_r, mode_g, mode_b]

    return mean_rgb, mode_rgb, median_rgb
github s0md3v / Bolt / bolt.py View on Github external
for each in result:
            score = each[1]
            if score == 100 and not sameTokenRemoved:
                sameTokenRemoved = True
                continue
            scores.append(score)
        average = statistics.mean(scores)
        averages.append(average)
    return statistics.mean(averages)


try:
    similarity = fuzzy(allTokens)
    print ('%s Tokens are %s%i%%%s similar to each other on an average' %
           (info, green, similarity, end))
except statistics.StatisticsError:
    print ('%s No CSRF protection to test' % bad)
    quit()


def staticParts(allTokens):
    strings = list(set(allTokens.copy()))
    commonSubstrings = {}
    for theString in strings:
        strings.remove(theString)
        for string in strings:
            commonSubstring = longestCommonSubstring(theString, string)
            if commonSubstring not in commonSubstrings:
                commonSubstrings[commonSubstring] = []
            if len(commonSubstring) > 2:
                if theString not in commonSubstrings[commonSubstring]:
                    commonSubstrings[commonSubstring].append(theString)
github twschiller / open-synthesis / openach / metrics.py View on Github external
:param evaluations: an iterable of iterables of Eval for a piece of evidence
    """
    # The "diagnosticity" needs to capture how well the evidence separates/distinguishes the hypotheses. If we don't
    # show a preference between consistent/inconsistent, STDDEV captures this intuition OK. However, in the future,
    # we may want to favor evidence for which hypotheses are inconsistent. Additionally, we may want to calculate
    # "marginal diagnosticity" which takes into the rest of the evidence.
    # (1) calculate the consensus for each hypothesis
    # (2) map N/A to neutral because N/A doesn't help determine consistency of the evidence
    # (3) calculate the population standard deviation of the evidence. It's more reasonable to consider the set of
    #     hypotheses at a given time to be the population of hypotheses than as a "sample" (although it doesn't matter
    #     much because we're comparing across hypothesis sets of the same size)
    na_neutral = map(mean_na_neutral_vote, evaluations)  # pylint: disable=bad-builtin
    try:
        return statistics.pstdev(filter(None.__ne__, na_neutral))  # pylint: disable=bad-builtin
    except statistics.StatisticsError:
        return 0.0
github itdaniher / rtlsdr-rds-demod / fm.py View on Github external
self.cur_state[curr_AB[group_type]][char_offset] = block['B0']
                self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1']
            if (char_offset is not None) and (blkid == "D")  and (group_type == 0) and (block_version == 'A'):
                self.cur_state[curr_AB[group_type]][char_offset+10] = block['B0']
                self.cur_state[curr_AB[group_type]][char_offset+11] = block['B1']
            if group_type in (0,2):
                #print(blkid, group_type, curr_AB[group_type], block_version)
                print(' '.join([str(x) for x in block.values()]))
            #print('\n'.join([''.join(x) for x in self.prog_name]))
            if blkid == "D":
                print('\n'.join([''.join(x) for x in self.cur_state]).replace('\r','╳'))
                group_type == None
                char_offset = None
                try:
                    self.PI = hex(statistics.mode(self.PIs))[2:]
                except statistics.StatisticsError:
                    self.PI = hex(self.PIs[0])[2:]
                self.callsign = picode.rdscall(self.PI)
                print(self.callsign)
github sglebs / srccheck / utilities / csvkaloi.py View on Github external
last_processed_metric = metric  # fix for #21, to reuse values
            last_all_values = all_values  # fix for #21, to reuse values
            last_max_value_found = max_value_found
        else: # stats, compute on the whole population
            if adjusted_metric == last_processed_metric: # fix for #21 - reuses values, thanks to sorting we know teh pure metric must have come just before
                all_values = last_all_values
                max_value_found = last_max_value_found
            else:
                all_values = [value for entity, value in metric_values()]
                last_processed_metric = adjusted_metric  # fix for 21. in case only stats functions are used, not the pure one.
                last_all_values = all_values  # fix for #21, same as above
            stats_value = stats_cache.get(adjusted_metric, {}).get(lambda_name, None) # fix for #22 - used cached value for stats
            if stats_value is None:
                try:
                    stats_value = lambda_stats(all_values)
                except statistics.StatisticsError as se:
                    print ("ERROR in %s: %s" % (metric, se))
                    continue

            highest_values_found_by_metric[metric] = stats_value
            if stats_value > max_allowed_value:  # we found a violation
                violation_count = violation_count + 1
                print("STATS threshold violation for '%s': %f > %f" % (metric, stats_value, max_allowed_value))
            else:
                if bool(cmdline_arguments["--showHighest"]):
                    print("...........................................")
                    print("INFO(STATS): %s = %s (violation threshold is %s):" % (metric, stats_value, max_allowed_value))
                    print("...........................................")
            #if mean is not None:
            #    stats_cache[metric] = {"AVG": mean, "MEDIAN": median, "STDEV": pstdev} # fix for #22 - used cached value for stats
            #if verbose:
            #    print("Saved %s" % file_name)
github jessamynsmith / eggtimer-server / periods / models.py View on Github external
def cycle_length_mode(self):
        try:
            return self._get_statistics_value(statistics.mode)
        except statistics.StatisticsError:
            return None
github samuelcolvin / pydantic / benchmarks / run.py View on Github external
def stdev(d):
    try:
        return stdev_(d)
    except StatisticsError:
        return 0
github PythonBalkan / pyconbalkan / pyconbalkan / cfp / models.py View on Github external
def rating(self):
        try:
            return median(self.ratings.all().values_list('mark', flat=True))
        except StatisticsError:
            return "N/A"