1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
|
#!/usr/bin/env python
"""This runs the same fits for the NIST StRD models but using
plain old scipy.optimize and relying on no code from lmfit.
In fact, it goes right down to using
scipy.optimize._minpack._lmdif()
The tests only check best-fit value, not estimated uncertainties.
Currently, not all tests pass.
"""
from __future__ import print_function
import sys
import math
import numpy as np
from scipy.optimize import _minpack
from NISTModels import Models, ReadNistData
try:
import matplotlib
matplotlib.use('WXAgg')
import pylab
HASPYLAB = True
except IOError:
HASPYLAB = False
def ndig(a, b):
return int(0.5-math.log10(abs(abs(a)-abs(b))/abs(b)))
def Compare_NIST_Results(DataSet, vals, NISTdata):
#print(' ======================================')
print(' %s: ' % DataSet)
print(' | Parameter Name | Value Found | Certified Value | # Matching Digits |')
print(' |----------------+----------------+------------------+-------------------|')
val_dig_min = 1000
err_dig_min = 1000
for i in range(NISTdata['nparams']):
parname = 'b%i' % (i+1)
thisval = vals[i]
certval = NISTdata['cert_values'][i]
vdig = ndig(thisval, certval)
pname = (parname + ' value ' + ' '*14)[:14]
print(' | %s | % -.7e | % -.7e | %2i |' % (pname, thisval, certval, vdig))
val_dig_min = min(val_dig_min, vdig)
print(' |----------------+----------------+------------------+-------------------|')
print(' Worst agreement: %i digits for value ' % (val_dig_min))
return val_dig_min
def NIST_Test(DataSet, start='start2', plot=True):
NISTdata = ReadNistData(DataSet)
resid, npar, dimx = Models[DataSet]
y = NISTdata['y']
x = NISTdata['x']
vals = []
for i in range(npar):
pname = 'b%i' % (i+1)
cval = NISTdata['cert_values'][i]
cerr = NISTdata['cert_stderr'][i]
pval1 = NISTdata[start][i]
vals.append(pval1)
maxfev = 2500 * (npar + 1)
factor = 100
xtol = 1.e-14
ftol = 1.e-14
epsfcn = 1.e-13
gtol = 1.e-14
diag = None
print(" Fit with: ", factor, xtol, ftol, gtol, epsfcn, diag)
_best, out, ier = _minpack._lmdif(resid, vals, (x, y), 1,
ftol, xtol, gtol,
maxfev, epsfcn, factor, diag)
digs = Compare_NIST_Results(DataSet, _best, NISTdata)
if plot and HASPYLAB:
fit = -resid(_best, x, )
pylab.plot(x, y, 'ro')
pylab.plot(x, fit, 'k+-')
pylab.show()
return digs > 2
msg1 = """
----- NIST StRD Models -----
Select one of the Models listed below:
and a starting point of 'start1' or 'start2'
"""
msg2 = """
That is, use
python fit_NIST.py Bennett5 start1
or go through all models and starting points with:
python fit_NIST.py all
"""
if __name__ == '__main__':
dset = 'Bennett5'
start = 'start2'
if len(sys.argv) < 2:
print(msg1)
out = ''
for d in sorted(Models.keys()):
out = out + ' %s ' % d
if len(out) > 55:
print( out)
out = ''
print(out)
print(msg2)
sys.exit()
if len(sys.argv) > 1:
dset = sys.argv[1]
if len(sys.argv) > 2:
start = sys.argv[2]
if dset.lower() == 'all':
tpass = 0
tfail = 0
failures = []
dsets = sorted(Models.keys())
for dset in dsets:
for start in ('start1', 'start2'):
if NIST_Test(dset, start=start, plot=False):
tpass += 1
else:
tfail += 1
failures.append(" %s (starting at '%s')" % (dset, start))
print('--------------------------------------')
print(' Final Results: %i pass, %i fail.' % (tpass, tfail))
print(' Tests Failed for:\n %s' % '\n '.join(failures))
print('--------------------------------------')
else:
NIST_Test(dset, start=start, plot=True)
|