## File: stat.inc

package info (click to toggle)
gnuplot 3.5beta6.347-3
 `123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258` ``````# # \$Id: stat.inc,v 1.3 1998/04/14 00:16:54 drd Exp \$ # # Library of Statistical Functions version 3.0 # # Permission granted to distribute freely for non-commercial purposes only # # Copyright (c) 1991, 1992 Jos van der Woude, jvdwoude@hut.nl # If you don't have the gamma() and/or lgamma() functions in your library, # you can use the following recursive definitions. They are correct for all # values i / 2 with i = 1, 2, 3, ... This is sufficient for most statistical # needs. #logsqrtpi = log(sqrt(pi)) #lgamma(x) = (x<=0.5)?logsqrtpi:((x==1)?0:log(x-1)+lgamma(x-1)) #gamma(x) = exp(lgamma(x)) # If you have the lgamma() function compiled into gnuplot, you can use # alternate definitions for some PDFs. For larger arguments this will result # in more efficient evalution. Just uncomment the definitions containing the # string `lgamma', while at the same time commenting out the originals. # NOTE: In these cases the recursive definition for lgamma() is NOT sufficient! # Some PDFs have alternate definitions of a recursive nature. I suppose these # are not really very efficient, but I find them aesthetically pleasing to the # brain. # Define useful constants fourinvsqrtpi=4.0/sqrt(pi) invpi=1.0/pi invsqrt2pi=1.0/sqrt(2.0*pi) log2=log(2.0) sqrt2=sqrt(2.0) sqrt2invpi=sqrt(2.0/pi) twopi=2.0*pi # define variables plus default values used as parameters in PDFs # some are integers, others MUST be reals a=1.0 alpha=0.5 b=2.0 df1=1 df2=1 g=1.0 lambda=1.0 m=0.0 mm=1 mu=0.0 nn=2 n=2 p=0.5 q=0.5 r=1 rho=1.0 sigma=1.0 u=1.0 # #define 1.0/Beta function # Binv(p,q)=exp(lgamma(p+q)-lgamma(p)-lgamma(q)) # #define Probability Density Functions (PDFs) # # NOTE: # The discrete PDFs are calulated for all real values, using the int() # function to truncate to integers. This is a monumental waste of processing # power, but I see no other easy solution. If anyone has any smart ideas # about this, I would like to know. Setting the sample size to a larger value # makes the discrete PDFs look better, but takes even more time. # Arcsin PDF arcsin(x)=invpi/sqrt(r*r-x*x) # Beta PDF beta(x)=Binv(p,q)*x**(p-1.0)*(1.0-x)**(q-1.0) # Binomial PDF #binom(x)=n!/(n-int(x))!/int(x)!*p**int(x)*(1.0-p)**(n-int(x)) bin_s(x)=n!/(n-int(x))!/int(x)!*p**int(x)*(1.0-p)**(n-int(x)) bin_l(x)=exp(lgamma(n+1)-lgamma(n-int(x)+1)-lgamma(int(x)+1)\ +int(x)*log(p)+(n-int(x))*log(1.0-p)) binom(x)=(n<20)?bin_s(x):bin_l(x) # Cauchy PDF cauchy(x)=b/(pi*(b*b+(x-a)**2)) # Chi-square PDF #chi(x)=x**(0.5*df1-1.0)*exp(-0.5*x)/gamma(0.5*df1)/2**(0.5*df1) chi(x)=exp((0.5*df1-1.0)*log(x)-0.5*x-lgamma(0.5*df1)-df1*0.5*log2) # Erlang PDF erlang(x)=lambda**n/(n-1)!*x**(n-1)*exp(-lambda*x) # Extreme (Gumbel extreme value) PDF extreme(x)=alpha*(exp(-alpha*(x-u)-exp(-alpha*(x-u)))) # F PDF f(x)=Binv(0.5*df1,0.5*df2)*(df1/df2)**(0.5*df1)*x**(0.5*df1-1.0)/\ (1.0+df1/df2*x)**(0.5*(df1+df2)) # Gamma PDF #g(x)=lambda**rho*x**(rho-1.0)*exp(-lambda*x)/gamma(rho) g(x)=exp(rho*log(lambda)+(rho-1.0)*log(x)-lgamma(rho)-lambda*x) # Geometric PDF #geometric(x)=p*(1.0-p)**int(x) geometric(x)=exp(log(p)+int(x)*log(1.0-p)) # Half normal PDF halfnormal(x)=sqrt2invpi/sigma*exp(-0.5*(x/sigma)**2) # Hypergeometric PDF hypgeo(x)=(int(x)>mm||int(x)mu)?0.5:-0.5)*igamma(0.5,0.5*((x-mu)/sigma)**2) # Pareto CDF cpareto(x)=x