tools_stats.py

Using statistical routines.

```#!/usr/bin/python
##############################################################################
# file: tools_stats.py                                                       #
# 'msparser' toolkit example code                                            #
##############################################################################
# COPYRIGHT NOTICE                                                           #
#                                                                            #
##############################################################################
#    \$Source: /vol/cvsroot/parser/examples/test_python/tools_stats.py,v \$    #
#     \$Author: neilr \$                                                    #
#       \$Date: 2016/04/22 14:16:29 \$                                         #
#   \$Revision: 1.2 \$                                                         #
# \$NoKeywords::                                                            \$ #
##############################################################################

import msparser

# ms_range function has been added to make the range supplied consistent with other languages such as Perl and C#
def ms_range(start, stop, step=1):
i = start
while i <= stop:
yield i
i += step

print("\n")
print("Bionominal coefficients for sample size 6:")

for i in ms_range(0, 6):
print("\t6 choose %d = %f" %(i, msparser.ms_quant_stats.binomialCoefficient(6, i)))

print("\n")
print("First 10 values of the Poisson(lambda = 1) distribution:")

for i in ms_range(0, 10):
print("\tPoisson(lambda 1, k %d) = %f" %(i, msparser.ms_quant_stats.poissonDensity(1, i)))

print("\n")
print("Common values of the standard normal distribution:")

for i in [0.001, 0.01, 0.025, 0.05, 0.1, 0.5, 0.9, 0.95, 0.975, 0.99, 0.999]:
# Note that normalCriticalValue() takes an argument between 0.5 and 1.0;
# that is, it returns only the right tail critical values.
if (i < 0.5):
critval = msparser.ms_quant_stats.normalCriticalValue(1 - i)
else:
critval = msparser.ms_quant_stats.normalCriticalValue(i)
print("\tNormal-1(%f) = %f" %(i, critval))

print("\n")
for i in [-3.09, -2.32, -1.96, -1.64, -1.28, 0, 1.28, 1.64, 1.96, 2.32, 3.09]:
# Note that normalCumulativeProbability() takes an argument between 0 and
# 4.09; that is, it reports only the right tail probability.
if (i < 0):
p = 0.5 - msparser.ms_quant_stats.normalCumulativeProbability(-i)
else:
p = 0.5 + msparser.ms_quant_stats.normalCumulativeProbability(i)
print("\tNormal(x <= %f) = %f" %(i, p))

print("\n")
print("Common values of the chi-square distribution at 10 degrees of freedom:")

# Note the significance levels the chi-square functions support.
for i in [0.001, 0.01, 0.025, 0.05, 0.1]:
lowcrit = msparser.ms_quant_stats.chisqLowerCriticalValue(10, i)
upcrit = msparser.ms_quant_stats.chisqUpperCriticalValue(10, i)
print("\tchi-sq(df 10, sigLevel %f) = <%f, %f>" %(i, lowcrit, upcrit))

print("\n")
print("Common values of the Student's t distribution at 10 degrees of freedom:")
# Note the significance levels the Student's t function supports.
for i in [0.001, 0.005, 0.01, 0.025, 0.05, 0.1]:
print("\tt(df 10, sigLevel %f) = %f" %(i, msparser.ms_quant_stats.studentsCriticalValue(10, i)))

print("\n")
print("Common values of the F distribution at 10 and 1 degrees of freedom:")
# Note the significance levels the F function supports.
for i in [0.01, 0.05, 0.1]:
print("\tF(df1 10, df2 1, sigLevel %f) = %f" %(i, msparser.ms_quant_stats.snedecorsCriticalValue(10, 1, i)))

print("\n")

samples = [
# Uniform data generated with
#   perl -e 'print join "\n", map {+ rand() } 0 .. 9'
['uniform(0,1)', 0.5, [0.400313346021907, 0.635154745166318, 0.795328049493161, 0.465079196585926, 0.709667388012424, 0.76161797692474, 0.617230566355229, 0.282583560369936, 0.251706165020014, 0.2962014901575]],
# Normal(5, 2.5) data generated in R with
# > rnorm(10, 5, 2.5)
['normal(5,2.5)', 5, [3.1476833, 9.2061900, 2.6079399, 4.4361054, 6.4133568, 4.3745899, 3.5588195, 3.0542840, 0.6643241, 4.3697818]],
# Beta(0.5, 0.5) data generated in R with
# > rbeta(10, 0.5, 0.5)
['beta(0.5,0.5)', 0.5, [0.754152463, 0.008992998, 0.221637853, 0.426146895, 0.656421042, 0.224818243, 0.022050868, 0.539143737, 0.877643550, 0.128030530]]
]

# Auxiliary helper functions:
def aux_helper(vec, vec_sorted):
print("\tSum = %g" %(msparser.ms_quant_stats.sum(vec)))
print("\tSum of squares = %g" %(msparser.ms_quant_stats.sumsq(vec)))
print("\tLog transformed = [%s]" %str((msparser.ms_quant_stats.logTransform(vec))))
print("\tExp transformed = [%s]" %str((msparser.ms_quant_stats.expTransform(vec))))

# Basic sample statistics:

xbar = msparser.ms_quant_stats.arithmeticMean(vec)
stdev = msparser.ms_quant_stats.arithmeticStandardDeviation(vec, xbar)
median = msparser.ms_quant_stats.sortedMedian(vec_sorted)

# If the vector isn't sorted:
# median = msparser.ms_quant_stats.unsortedMedian(vec)

geombar = msparser.ms_quant_stats.geometricMean(vec)
geomdev = msparser.ms_quant_stats.geometricStandardDeviation(vec, geombar)

print("\tArithmetic stdev = %g" %(stdev))
print("\tArithmetic mean = %g" %(xbar))
print("\tStandard error of mean = %g" %(msparser.ms_quant_stats.arithmeticStandardErrorOfMean(stdev, len(vec))))
print("\tMedian = %g" %(median))
print("\tStandard error of median = %g" %(msparser.ms_quant_stats.arithmeticStandardErrorOfMedian(stdev, len(vec))))
print("\tGeometric mean = %g" %(geombar))
print("\tGeometric stdev = %g" %(geomdev))

# Basic distribution tests for normally distributed data:

print("\tHypothesis: distribution is N(%g, sigma^2) (mean test). p-value = %g" %(sample[1], msparser.ms_quant_stats.calculateNormalMeanPvalue(sample[1], xbar, stdev, len(vec))))
print("\tHypothesis: distribution is N(%g, sigma^2) (median test). p-value = %g" %(sample[1], msparser.ms_quant_stats.calculateNormalMedianPvalue(sample[1], median, stdev, len(vec))))

# (We would expect the normal sample to have the highest p-value, i.e.
# least evidence against the hypothesis, while the uniform and beta
# distribution should have fairly low p-values. However, sample size is
# so small that the tests are not very reliable.)

ok, W, p = msparser.ms_quant_stats.testShapiroWilkW(vec_sorted)
if W == None:
W = 'None'
if p == None:
p = 'None'
print("\tShapiro-Wilk ok = %d, W = %s, p-value = %s" %(ok, W, p))

# Outlier detection can be done even when the above tests indicate that
# the sample is not normally distributed. However, don't trust the results!

# Note that the input vector must be sorted for outlier testing to
# work.
numL, numH = msparser.ms_quant_stats.detectOutliers(vec_sorted, "auto", 0.05)
if numL == None:
numL = 'None'
if numH == None:
numH = 'None'
# numL and numH are the number of elements at each end of vec_sorted
# which seem to be outliers. That is, elements between 0 and numL-1,
# and len(vec_sorted)-numH to len(vec_sorted)-1.

print("\tOutlier detection('auto', 0.05') = <%s, %s>" %(numL, numH))

for sample in samples:
print("Sample '%s' = [%s]" %(sample[0], sample[2]))
# ms_quant_stats takes as input STL vectors of type double. We can easily
# convert a Python list into one:
vec = msparser.vectord()
vectors = sample[2]
for i in ms_range(0, len(vectors)-1):
vec.append(vectors[i])

# The sorted sample vector is needed for the Shapiro-Wilk W test.
# It also makes finding the sample median fast.
vec_sorted = msparser.vectord()
vectors_sorted = sample[2]
vectors_sorted.sort()
for i in ms_range(0, len(vectors_sorted)-1):
vec_sorted.append(vectors_sorted[i])
aux_helper(vec, vec_sorted)
print("\n")

'''
Running the program as

python tools_stats.pl

will give the following output under Mascot 2.5:

Bionominal coefficients for sample size 6:
6 choose 0 = 1.000000
6 choose 1 = 6.000000
6 choose 2 = 15.000000
6 choose 3 = 20.000000
6 choose 4 = 15.000000
6 choose 5 = 6.000000

First 10 values of the Poisson(lambda = 1) distribution:
Poisson(lambda 1, k 0) = 0.367879
Poisson(lambda 1, k 1) = 0.367879
Poisson(lambda 1, k 2) = 0.183940
Poisson(lambda 1, k 3) = 0.061313
Poisson(lambda 1, k 4) = 0.015328
Poisson(lambda 1, k 5) = 0.003066
Poisson(lambda 1, k 6) = 0.000511
Poisson(lambda 1, k 7) = 0.000073
Poisson(lambda 1, k 8) = 0.000009
Poisson(lambda 1, k 9) = 0.000001

Common values of the standard normal distribution:
Normal-1(0.001000) = 3.090000
Normal-1(0.010000) = 2.318000
Normal-1(0.025000) = 1.959000
Normal-1(0.050000) = 1.642000
Normal-1(0.100000) = 1.282000
Normal-1(0.500000) = 0.000000
Normal-1(0.900000) = 1.282000
Normal-1(0.950000) = 1.642000
Normal-1(0.975000) = 1.959000
Normal-1(0.990000) = 2.318000
Normal-1(0.999000) = 3.090000

Normal(x <= -3.090000) = 0.001000
Normal(x <= -2.320000) = 0.005540
Normal(x <= -1.960000) = 0.025000
Normal(x <= -1.640000) = 0.050500
Normal(x <= -1.280000) = 0.100270
Normal(x <= 0.000000) = 0.500000
Normal(x <= 1.280000) = 0.899730
Normal(x <= 1.640000) = 0.949500
Normal(x <= 1.960000) = 0.975000
Normal(x <= 2.320000) = 0.994460
Normal(x <= 3.090000) = 0.999000

Common values of the chi-square distribution at 10 degrees of freedom:
chi-sq(df 10, sigLevel 0.001000) = <1.479000, 29.588000>
chi-sq(df 10, sigLevel 0.010000) = <2.558000, 23.209000>
chi-sq(df 10, sigLevel 0.025000) = <3.247000, 20.483000>
chi-sq(df 10, sigLevel 0.050000) = <3.940000, 18.307000>
chi-sq(df 10, sigLevel 0.100000) = <4.865000, 15.987000>

Common values of the Student's t distribution at 10 degrees of freedom:
t(df 10, sigLevel 0.001000) = 4.143000
t(df 10, sigLevel 0.005000) = 3.169000
t(df 10, sigLevel 0.010000) = 2.764000
t(df 10, sigLevel 0.025000) = 2.228000
t(df 10, sigLevel 0.050000) = 1.812000
t(df 10, sigLevel 0.100000) = 1.372000

Common values of the F distribution at 10 and 1 degrees of freedom:
F(df1 10, df2 1, sigLevel 0.010000) = 6055.850000
F(df1 10, df2 1, sigLevel 0.050000) = 241.882000
F(df1 10, df2 1, sigLevel 0.100000) = 60.195000

Sample 'uniform(0,1)' = [[0.400313346021907, 0.635154745166318, 0.795328049493161, 0.465079196585926, 0.709667388012424, 0.76161797692474, 0.617230566355229, 0.282583560369936, 0.251706165020014, 0.2962014901575]]
Sum = 5.21488
Sum of squares = 3.10813
Log transformed = [(-0.915507673489646, -0.4538866166025002, -0.22900060856873583, -0.7655475726582238, -0.342958886300134, -0.2723101916281718, -0.48251263548864914, -1.2637809832124323, -1.379492883616736, -1.216715346244483)]
Exp transformed = [(1.4922922282212572, 1.8873141716084079, 2.215167562218903, 1.592140275968374, 2.03331484127, 2.1417387024328853, 1.853786986176205, 1.3265526176931346, 1.2862180459002401, 1.3447410816291303)]
Arithmetic stdev = 0.2078
Arithmetic mean = 0.521488
Standard error of mean = 0.065712
Median = 0.541155
Standard error of median = 0.0773299
Geometric mean = 0.480864
Geometric stdev = 1.54969
Hypothesis: distribution is N(0.5, sigma^2) (mean test). p-value = 0.75114
Hypothesis: distribution is N(0.5, sigma^2) (median test). p-value = 0.607474
Shapiro-Wilk ok = 1, W = 0.904955259788, p-value = 0.248103833546
Outlier detection('auto', 0.05') = <0, 0>

Sample 'normal(5,2.5)' = [[3.1476833, 9.20619, 2.6079399, 4.4361054, 6.4133568, 4.3745899, 3.5588195, 3.054284, 0.6643241, 4.3697818]]
Sum = 41.8331
Sum of squares = 222.941
Log transformed = [(1.146666721936465, 2.2198760838959566, 0.9585605993203508, 1.489776829350127, 1.8583828156064155, 1.47581277827312, 1.2694288887485337, 1.1165451952692114, -0.40898514617929754, 1.474713076510773)]
Exp transformed = [(23.282064495279258, 9958.582289145075, 13.571064284632554, 84.44541928145347, 609.9376873108102, 79.40726795332382, 35.1217114826069, 21.205996596360794, 1.9431766842547191, 79.02638626067342)]
Arithmetic stdev = 2.30796
Arithmetic mean = 4.18331
Standard error of mean = 0.72984
Median = 3.9643
Standard error of median = 0.858876
Geometric mean = 3.5257
Geometric stdev = 2.00172
Hypothesis: distribution is N(5, sigma^2) (mean test). p-value = 0.292114
Hypothesis: distribution is N(5, sigma^2) (median test). p-value = 0.258609
Shapiro-Wilk ok = 1, W = 0.91741836864, p-value = 0.335925333025
Outlier detection('auto', 0.05') = <0, 0>

Sample 'beta(0.5,0.5)' = [[0.754152463, 0.008992998, 0.221637853, 0.426146895, 0.656421042, 0.224818243, 0.022050868, 0.539143737, 0.87764355, 0.12803053]]
Sum = 3.85904
Sum of squares = 2.3588
Log transformed = [(-0.28216072584468, -4.7113090044449795, -1.506710521909531, -0.8529711682072173, -0.42095286360724493, -1.4924630121200262, -3.8144033127847012, -0.6177730701548017, -0.13051474727786935, -2.055486527877849)]
Exp transformed = [(2.1258090578036466, 1.0090335564961697, 1.2481192933509702, 1.5313457056396895, 1.9278801704488464, 1.252095118456305, 1.0222957852898442, 1.7145381378978928, 2.4052252302395134, 1.1365877021899178)]
Arithmetic stdev = 0.310837
Arithmetic mean = 0.385904
Standard error of mean = 0.0982953
Median = 0.325483
Standard error of median = 0.115674
Geometric mean = 0.204237
Geometric stdev = 4.71093
Hypothesis: distribution is N(0.5, sigma^2) (mean test). p-value = 0.275606
Hypothesis: distribution is N(0.5, sigma^2) (median test). p-value = 0.165651
Shapiro-Wilk ok = 1, W = 0.930482521074, p-value = 0.452655082151
Outlier detection('auto', 0.05') = <0, 0>

'''
```