File: optimpack.h

package info (click to toggle)
yorick-optimpack 1.3.2%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: jessie, jessie-kfreebsd, wheezy
  • size: 440 kB
  • ctags: 248
  • sloc: ansic: 1,268; makefile: 110
file content (763 lines) | stat: -rw-r--r-- 32,832 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
/*
 * optimpack.h --
 *
 *	Definitions for optimization routines implemented in OptimPack
 *	library.
 *
 *-----------------------------------------------------------------------------
 *
 *      Copyright (c) 2003, Eric THIEBAUT.
 *
 *	This file is part of OptimPack.
 *
 *	OptimPack is  free software; you can redistribute  it and/or modify
 *	it under the  terms of the GNU General  Public License as published
 *	by the Free  Software Foundation; either version 2  of the License,
 *	or (at your option) any later version.
 *
 *	OptimPack is  distributed in the hope  that it will  be useful, but
 *	WITHOUT  ANY  WARRANTY;  without   even  the  implied  warranty  of
 *	MERCHANTABILITY or  FITNESS FOR A PARTICULAR PURPOSE.   See the GNU
 *	General Public License for more details.
 *
 *	You should have  received a copy of the  GNU General Public License
 *	along with OptimPack (file  "LICENSE" in the top source directory);
 *	if  not, write  to the  Free Software  Foundation, Inc.,  59 Temple
 *	Place, Suite 330, Boston, MA 02111-1307 USA
 *
 *-----------------------------------------------------------------------------
 *
 *	$Id$
 *	$Log$
 *
 *-----------------------------------------------------------------------------
 */

#ifndef _OPTIMPACK_H
#define _OPTIMPACK_H 1

/* Customizable data types:
 *   OP_INTEGER = data type used to store array indices
 *   OP_LOGICAL = data type of the result of a logical test
 */
#ifndef OP_INTEGER
# define OP_INTEGER int
#endif
#ifndef OP_LOGICAL
# define OP_LOGICAL int
#endif


/* Values returned by OptimPack routines: */
#define OP_ERROR 1
#define OP_OK    0

#define OP_TRUE  1
#define OP_FALSE 0

/*---------------------------------------------------------------------------*/
/* USEFUL MACROS */

/* OP_STRINGIFY takes an argument and wraps it in "" (double quotation
   marks), OP_CONCAT concatenates two arguments. */
#ifdef __STDC__
# define OP_STRINGIFY(x)     #x
# define OP_CONCAT(a,b)      a##b
# define OP_CONCAT2(a,b)     a##b
# define OP_CONCAT3(a,b,c)   a##b##c
# define OP_CONCAT4(a,b,c,d) a##b##c##d
#else
# define OP_STRINGIFY(x)     "x"
# define OP_CONCAT(a,b)      a/**/b
# define OP_CONCAT2(a,b)     a/**/b
# define OP_CONCAT3(a,b,c)   a/**/b/**/c
# define OP_CONCAT4(a,b,c,d) a/**/b/**/c/**/d
#endif

/* Computes absolute value: */
#define OP_ABS(a)   ((a)>=0?(a):-(a))

/* Computes min/max values: */
#define OP_MIN(a,b) ((a)<=(b)?(a):(b))
#define OP_MAX(a,b) ((a)>=(b)?(a):(b))

/* Computes minimal number of chunks with M elements
   needed to store N elements: */
#define OP_HOW_MANY(n, m) (((n)+((m)-1))/(m))

/* Returns N elements rounding up to a multiple of M elements: */
#define OP_ROUND_UP(n, m) (OP_HOW_MANY(n, m)*(m))

/* Offset (in bytes) of member M in structure S: */
#define OP_OFFSET_OF(s, m) ((size_t) &((s *)0)->m)

/* C++ needs to know that types and declarations are C, not C++. */
#ifdef  __cplusplus
# define _OP_BEGIN_DECLS  extern "C" {
# define _OP_END_DECLS    }
#else
# define _OP_BEGIN_DECLS  /* empty */
# define _OP_END_DECLS    /* empty */
#endif
_OP_BEGIN_DECLS

typedef OP_INTEGER op_integer_t;
typedef OP_LOGICAL op_logical_t;

/*---------------------------------------------------------------------------*/
/* LINE SEARCH */

#define OP_TASK_START   0 /* first entry, start search */
#define OP_TASK_FG      1 /* computation of F and G requested */
#define OP_TASK_NEWX    2 /* new improved solution available for inspection */
#define OP_TASK_CONV    3 /* search has converged */
#define OP_TASK_WARN    4 /* search aborted with warning */
#define OP_TASK_ERROR   5 /* search aborted with error */

extern int op_csrch(double f, double g, double *stp_ptr,
		    double ftol, double gtol, double xtol,
		    double stpmin, double stpmax, int *task,
		    char csave[], op_integer_t isave[], double dsave[]);
/*
 * DESCRIPTION:
 *   This  subroutine finds  a step  that satisfies  a  sufficient decrease
 *   condition and a curvature condition.
 *
 *   Each call of the subroutine updates an interval with endpoints STX and
 *   STY. The interval is initially  chosen so that it contains a minimizer
 *   of the modified function:
 *
 *       psi(stp) = f(stp) - f(0) - ftol*stp*g(0)
 *
 *   where g(0) = f'(0).   If psi(stp) <= 0 and g(stp) >=  0 for some step,
 *   then the interval is chosen so that it contains a minimizer of f.  The
 *   algorithm is  designed to  find a step  that satisfies  the sufficient
 *   decrease condition:
 *
 *     f(stp) <= f(0) + ftol*stp*g(0),                            (1)
 *
 *   and the curvature condition:
 *
 *     abs(g(stp)) <= gtol*abs(g(0)).                             (2)
 *
 *   Relations (1) and (2) are called the strong Wolfe conditions.  If FTOL
 *   is less than GTOL and if,  for example, the function is bounded below,
 *   then there  is always a step  which satisfies both  conditions.  If no
 *   step can be  found that satisfies both conditions,  then the algorithm
 *   stops with a warning.  In  this case STP only satisfies the sufficient
 *   decrease condition.
 *
 *
 * ARGUMENTS:
 *   (Note: the user  must not alter  TASK and work arrays ISAVE and DSAVE
 *   between calls.)
 *
 *   F is a double precision variable.  On initial entry, F is the value of
 *     the function  at 0.  On  subsequent entries, F  is the value  of the
 *     function at STP.  On exit, F is left unchanged.
 *
 *   G is  a  double  precision  variable.   On  initial  entry, G  is  the
 *     derivative of  the function at 0.   On subsequent entries,  G is the
 *     derivative of the function at STP.  On exit, G is left unchanged.
 *
 *   STP  is a double  precision variable.   On entry,  STP is  the current
 *     estimate  of a  satisfactory  step.  On  initial  entry, a  positive
 *     initial estimate  must be  provided.  On exit  with TASK=OP_TASK_FG,
 *     STP  is the  new  estimate of  a  satisfactory step.   On exit  with
 *     TASK=OP_TASK_CONV,   STP  is  left   unchanged  and   satisfies  the
 *     sufficient decrease and curvature  condition.  On exit with TASK not
 *     equal to OP_TASK_CONV, STP is left unchanged.
 *
 *   FTOL  is a  double precision  variable.   On entry,  FTOL specifies  a
 *     nonnegative  tolerance for  the sufficient  decrease  condition.  On
 *     exit, FTOL is unchanged.  You should take 0 < FTOL < 0.5
 *
 *   GTOL  is a  double precision  variable.   On entry,  GTOL specifies  a
 *     nonnegative tolerance for the curvature condition.  On exit, GTOL is
 *     unchanged.  You should take FTOL < GTOL < 1.
 *
 *   XTOL  is a  double precision  variable.   On entry,  XTOL specifies  a
 *     nonnegative  relative   tolerance  for  an   acceptable  step.   The
 *     subroutine exits  with a warning if the  relative difference between
 *     STY and STX is less than XTOL.  On exit, XTOL is unchanged.
 *
 *   STPMIN  is  a  double  precision  variable.  On  entry,  STPMIN  is  a
 *     nonnegative lower bound for the step.  On exit, STPMIN is unchanged.
 *
 *   STPMAX  is  a  double  precision  variable.  On  entry,  STPMAX  is  a
 *     nonnegative upper bound for the step.  On exit, STPMAX is unchanged.
 *
 *   TASK is  an integer variable.  On  initial entry, task must  be set to
 *     OP_TASK_START.  On exit, TASK indicates the required action:
 *
 *       If TASK=OP_TASK_FG  then evaluate  the function and  derivative at
 *         STP and call op_dcsrch again.
 *
 *       If TASK=OP_TASK_CONV then the search is successful.
 *
 *       If TASK=OP_TASK_WARN  then the subroutine  is not able  to satisfy
 *         the convergence  conditions. The exit value of  stp contains the
 *         best point found during the search.
 *
 *       If  TASK=OP_TASK_ERROR  then  there  is  an  error  in  the  input
 *         arguments.
 *
 *     On exit  with convergence,  a warning or  an error, the  array CSAVE
 *     contains additional information (unless it was NULL).
 *
 *   CSAVE is  a character  work array of,  at least,  OP_MSG_SIZE elements
 *     which is used to store a message corresponding to the value of TASK.
 *
 *   ISAVE is an integer work array of, at least, 2 elements.
 *
 *   DSAVE is a double precision work array of, at least, 12 elements.
 *
 *
 * RETURNED VALUE:
 *   The returned value is less or equal zero to signal an error:
 *      0 if STPMAX < STPMIN
 *     -1 if descent condition violated, i.e. DX*(STP - STX) >= 0
 *     -2 if STP outside bracket (STX,STY)
 *     -3 if STPMIN < 0
 *     -4 if XTOL < 0
 *     -5 if FTOL <= 0
 *     -6 if GTOL <= 0
 *     -7 if initial G >= 0
 *     -8 if STP > STPMAX
 *     -9 if STP < STPMIN
 *   The returned  value is greater  or equal 3  to indicate that  the line
 *   search cannot converge (warning):
 *      3 if STP = STPMIN
 *      4 if STP = STPMAX
 *      5 if XTOL test satisfied
 *      6 if rounding errors prevent progress
 *   Otherwise (normal return), the returned value is:
 *      1 if caller must evaluate (i.e. TASK = OP_TASK_FG)
 *      2 if line search has convergenced (i.e. TASK = OP_TASK_CONV)
 *
 *
 * EXEMPLE:
 *   A typical invocation of op_csrch has the following outline:
 *
 *     task = OP_TASK_START;
 *     f = ...;   // function value for STP=0
 *     g = ...;   // derivative value for STP=0
 *     stp = ...; // guess for next STP value to try (STP > 0.0)
 *     for (;;) {
 *       op_csrch(f, g, &stp, ftol, gtol, xtol, stpmin, stpmax, &task,
 *     	   csave, isave, dsave);
 *       if (task == OP_TASK_FG) {
 *         // Evaluate the function and the gradient at STP.
 *         f = func(STP);
 *         g = grad(STP);
 *       } else if (task == OP_TASK_CONV) {
 *         // Search has converged.
 *         break;
 *       } else if (task == OP_TASK_WARN) {
 *         // Some problem prevents further progress.
 *         fprintf(stderr, "warning in %s\n", csave);
 *         exit(1);
 *       } else {
 *         // An error occured.
 *         fprintf(stderr, "error in %s\n", csave);
 *         exit(1);
 *       }
 *     }
 *
 *
 * REFERENCES:
 *   [1] Jorge  J. Mor and David  J. Thuente, "Line search algorithms with
 *       guaranteed   sufficient   decrease"   in   ACM   Transactions   on
 *       Mathematical  Software (TOMS)  Volume 20,  Issue 3,  Pages 286-307
 *       (September 1994).
 *
 *
 * HISTORY:
 *   MINPACK-1 Project. June 1983.
 *   Argonne National Laboratory.
 *   Jorge J. Mor and David J. Thuente.
 *
 *   MINPACK-2 Project. November 1993.
 *   Argonne National Laboratory and University of Minnesota.
 *   Brett M. Averick, Richard G. Carter, and Jorge J. Mor.
 *
 *   Yorick translation an improvements.  October 2001.
 *   C-version.  February 2003.
 *   Observatoire de Lyon (France).
 *   Eric Thibaut.
 */

extern int op_cstep(double *stx_ptr, double *fx_ptr, double *dx_ptr,
		    double *sty_ptr, double *fy_ptr, double *dy_ptr,
		    double *stp_ptr, double  fp,     double  dp,
		    int *brackt_ptr, double stpmin, double stpmax,
		    char *errmsg);
/*
 * DESCRIPTION:
 *   These functions compute a safeguarded  step for a search procedure and
 *   updates an interval  that contains a step that  satisfies a sufficient
 *   decrease and a curvature condition [1].
 *
 *   The parameter STX contains the step with the least function value.  If
 *   BRACKT  is set  to true  (i.e.  non-zero)  then a  minimizer  has been
 *   bracketed in  an interval with  endpoints STX and STY.   The parameter
 *   STP contains the current step.   The subroutine assumes that if BRACKT
 *   is true then:
 *
 *     min(STX,STY) < STP < max(STX,STY),
 *
 *   and that  the derivative at  STX is negative  in the direction  of the
 *   step.
 *
 *
 * ARGUMENTS:
 *   STX_PTR, FX_PTR and DX_PTR are  the addresses where the values of STX,
 *     FX  and DX  are  stored.  STX,  FX,  and DX  specify  the step,  the
 *     function, and the derivative at  the best step obtained so far.  The
 *     derivative must be  negative in the direction of  the step, that is,
 *     DX and STP-STX must have opposite signs.  On output these parameters
 *     are updated appropriately.
 *
 *   STY_PTR, FY_PTR and DY_PTR are  the addresses where the values of STY,
 *     FY  and DY  are  stored.  STY,  FY,  and DY  specify  the step,  the
 *     function, and the  derivative at the other endpoint  of the interval
 *     of   uncertainty.    On   output   these  parameters   are   updated
 *     appropriately.
 *
 *   STP_PTR is the  addresses where the value of STP  is stored.  STP, FP,
 *     and DP  specify the  step, the function,  and the derivative  at the
 *     current  step.  If  BRACKT is  set true  then on  input STP  must be
 *     between  STX and  STY.  On  output STP  (i.e. the  value  at address
 *     STP_PTR) is set to the new step.
 *
 *  
 *   BRACKT_PTR  is the  addresses where  the  value of  BRACKT is  stored.
 *     BRACKT  is a  logical variable.   On  entry, BRACKT  specifies if  a
 *     minimizer has been bracketed.  Initially BRACKT must be set to false
 *     (i.e  zero).  On  exit, BRACKT  specifies  if a  minimizer has  been
 *     bracketed.  When a minimizer is bracketed, BRACKT (i.e. the value at
 *     address BRACKT_PTR) is set to true (i.e. non-zero).
 *
 *   STPMIN and STPMAX specify lower and upper bounds for the step.
 *
 *   ERRMSG is a character buffer  with at least OP_MSG_SIZE bytes (or NULL
 *     to have  no error  message) used  to store an  error message  if the
 *     routine returns OP_ERROR.
 *
 *
 * RETURNED VALUE:
 *   The returned value is less or equal zero to signal an error:
 *      0 if STPMAX < STPMIN
 *     -1 if descent condition violated, i.e. DX*(STP - STX) >= 0
 *     -2 if STP outside bracket (STX,STY)
 *   otherwise (no  error) the returned value is  1, 2, 3 or  4 to indicate
 *   which how  the new  step was guessed  (see the  code and ref.  [1] for
 *   details).
 *
 *
 * REFERENCES:
 *   [1] Jorge  J. Mor and David  J. Thuente, "Line search algorithms with
 *       guaranteed   sufficient   decrease"   in   ACM   Transactions   on
 *       Mathematical  Software (TOMS)  Volume 20,  Issue 3,  Pages 286-307
 *       (September 1994).
 *
 *
 * HISTORY:
 *   MINPACK-1 Project. June 1983
 *   Argonne National Laboratory.
 *   Jorge J. Mor and David J. Thuente.
 *
 *   MINPACK-2 Project. November 1993.
 *   Argonne National Laboratory and University of Minnesota.
 *   Brett M. Averick and Jorge J. Mor.
 *
 *   Yorick translation an improvements.  October 2001.
 *   C-version.  February 2003.
 *   Observatoire de Lyon (France).
 *   Eric Thibaut.
 */

/*---------------------------------------------------------------------------*/
/* VMLMB - limited memory variable metric method (BFGS)
           with/without bound constraints */

#define OP_VMLMB_CSAVE_NUMBER       OP_MSG_SIZE
#define OP_VMLMB_ISAVE_NUMBER       12
#define OP_VMLMB_DSAVE_NUMBER(n, m) (27 + (n) + 2*(m)*((n) + 1))

extern int op_vmlmb_first(op_integer_t n, op_integer_t m,
			  double fatol, double frtol,
			  double sftol, double sgtol, double sxtol,
			  double epsilon, double costheta,
			  char csave[], op_integer_t isave[], double dsave[]);
extern int op_vmlmb_next(double x[], double *f, double g[],
			 op_logical_t active[], const double h[],
			 char csave[], op_integer_t isave[], double dsave[]);
/* VMLM-B computes  a local  minimizer of  a function of  N variables  by a
 * limited memory variable metric (BFGS) method; optionally, the parameters
 * may be bounded.  The user must evaluate the function and the gradient.
 *
 * VMLM-B   is   implemented   via   two  functions:   op_vmlmb_first   for
 * initialization   and  op_vmlmb_next   for  further   iterations.   These
 * functions use  reverse communication.  The  user must choose  an initial
 * approximation X to the minimizer, evaluate the function and the gradient
 * at X, and make the initial call  with TASK set to "start".  On exit TASK
 * indicates the required action.
 *
 * The arguments are:
 *
 *   N is the number of parameters.
 *
 *   M is  the number of correction  pairs to remember in  order to compute
 *       the  limited memory  variable metric  (BFGS) approximation  of the
 *       inverse of the Hessian.  For large problems, M = 3 to 5 gives good
 *       results.  For  small problems, M should  be less or  equal N.  The
 *       larger is  M (and N)  the more computer  memory will be  needed to
 *       store the workspaces (see DSAVE).
 *
 *   FRTOL   is  the  relative   error  desired   in  the   function  (e.g.
 *       FRTOL=1e-8).  Convergence  occurs if the estimate  of the relative
 *       error between F(X)  and F(XSOL), where XSOL is  a local minimizer,
 *       is less or  equal FRTOL.  FRTOL must have  a non-negative floating
 *       point value.
 *
 *   FATOL is the absolute error  desired in the function (e.g. FATOL=0.0).
 *       Convergence occurs  if the estimate of the  absolute error between
 *       F(X)  and F(XSOL), where  XSOL is  a local  minimizer, is  less or
 *       equal FATOL.  FATOL must have a non-negative floating point value.
 *
 *   SFTOL, SGTOL, and SXTOL are  tolerances for the line search subroutine
 *       (see  op_csrch).    Recommended  values:  SFTOL=0.001,  SGTOL=0.9,
 *       SXTOL=0.1  (other   values  may   be  more  suitable   for  highly
 *       non-quadratic penalty function).
 *
 *   EPSILON is a small (strictly positive) value used to discard BFGS updates
 *       that may yield a non positive definite Hessian approximation.
 *
 *   COSTHETA is a small value, in the range [0,1), equals to the cosine of
 *       the maximum angle between the search direction and the anti-gradient.
 *       The BFGS recursion is restarted, whenever the search direction is not
 *       sufficiently "descending".
 *
 *   CSAVE is  a character workspace array  of length OP_VMLMB_CSAVE_NUMBER
 *       (same  as   OP_MSG_SIZE)  which   is  used  to   store  additional
 *       information on exit with convergence, a warning or an error.
 *
 *   ISAVE is an integer workspace array of length OP_VMLMB_ISAVE_NUMBER.
 *
 *   DSAVE is a floating point workspace array of length equal to the value
 *       returned by the macro OP_VMLMB_DSAVE_NUMBER(N, M):
 *           26 + N + 2*M*(N + 1).
 *
 *   X is a  double  precision  array of  length  N.   On  entry,  X is  an
 *       approximation to the solution.   On exit with TASK=OP_TASK_CONV, X
 *       is the current approximation.
 *
 *   F is the  address of a double precision variable.  On  entry, F is the
 *       value of  the function  at X.   On final exit,  F is  the function
 *       value at X.
 *
 *   G is a double  precision array of length N.  On entry,  G is the value
 *       of  the gradient  at X.   On final  exit, G  is the  value  of the
 *       gradient at X.
 *
 *   ACTIVE  is an optional  integer array  with length  N provided  by the
 *       caller if the  values in X has bounds.  If  the parameters have no
 *       bounds,  ACTIVE  should   be  NULL  (unconstrained  minimization).
 *       Otherwise,  elements  set to  zero  in  ACTIVE  indicate that  the
 *       corresponding values  in X has reached  a bound and  should not be
 *       changed during  the next step  because the gradient has  the wrong
 *       sign (i.e.  the steepest descent direction would violate the bound
 *       constraints):
 *           ACTIVE[i] = 0 if i-th value has a lower bound XLO[i]
 *                           and X[i]=XLO[i] and G[i]>=0 
 *                       0 if i-th value has an upper bound XHI[i]
 *                           and X[i]=XHI[i] and G[i]<=0
 *                       1 (or any non-zero value) otherwise
 *
 *       ACTIVE needs  only to be  computed (and specified) the  first time
 *       op_vmlmb_next is called and  when TASK=OP_TASK_NEWX (i.e.  after a
 *       successful   step).    ACTIVE   may   also   be   specified   when
 *       TASK=OP_TASK_CONV  (i.e.   after  convergence  if caller  wish  to
 *       continue with  minimization).  If X has (some)  bounds, the caller
 *       is responsible for applying the  bounds to X before evaluating the
 *       function value  F and the gradient G  (i.e. when TASK=OP_TASK_FG),
 *       e.g.:
 *           if (X[i] < XLO[i]) X[i] = XLO[i];
 *           if (X[i] > XHI[i]) X[i] = XHI[i];
 *
 *       If H is  not specified (i.e. H is  NULL) or if H[i] > 0  for all i
 *       such that ACTIVE[i] is non-zero, then ACTIVE is left unchanged.
 *
 *   H is an optional double precision  array with length N provided by the
 *       caller and such that diag(H) is an approximation of the inverse of
 *       the Hessian matrix.  If H is NULL, then the inverse of the Hessian
 *       is approximated by a simple rescaling using Shanno & Phua formula.
 *       Otherwise, if ACTIVE  is NULL, all elements of  H must be strictly
 *       greater than  zero; else  ACTIVE[i] is  set to zero  if H[i]  <= 0
 *       (this is the only case  where ACTIVE is modified).  As for ACTIVE,
 *       H needs only to be specifed  the first time op_vmlmb is called and
 *       when JOB=2.
 *
 *   TASK is  the value returned  by op_vmlmb_first and  op_vmlmb_next.  It
 *       can have one of the following values:
 *           OP_TASK_FG - caller must evaluate the function and gradient at
 *               X and call op_vmlm_next.
 *           OP_TASK_NEWX  -   a  new  iterate  has   been  computed.   The
 *               approximation X, function F,  and gradient G are available
 *               for examination.
 *
 *           OP_TASK_CONV  -  the  search  is  successful.   The  solution,
 *               function value and gradient are available in X, F and G.
 *
 *           OP_TASK_WARN -  VMLMB is not  able to satisfy  the convergence
 *               conditions.   The  exit  value  of  X  contains  the  best
 *               approximation found so  far.  Warning message is available
 *               in CSAVE.
 *           OP_TASK_ERROR then  there is an error in  the input arguments.
 *               Error message is available in CSAVE.
 *
 * The caller must  not modify the workspace arrays  CSAVE, ISAVE and DSAVE
 * between calls to op_vmlmb_first and further calls to op_vmlmb_next.
 *
 * A  typical invocation of  VMLMB for  unconstrained minimization  has the
 * following outline:
 *
 *    // Choose a starting vector:
 *    for (i=0 ; i<n ; ++i) x[i] = ... ;
 *  
 *    // Allocate and setup workspaces:
 *    csave = malloc(OP_VMLMB_CSAVE_NUMBER*sizeof(char));
 *    isave = malloc(OP_VMLMB_ISAVE_NUMBER*sizeof(op_integer_t));
 *    dsave = malloc(OP_VMLMB_DSAVE_NUMBER(n, m)*sizeof(double));
 *    task = op_vmlmb_first(n, m, fmin, fatol, frtol, sftol, sgtol, sxtol,
 *                          csave, isave, dsave);
 *    for (;;) {
 *      if (task == OP_TASK_FG) {
 *        f = ...;  // evaluate the function at X; store in F
 *        g = ...;  // evaluate the gradient of F at X; store in G
 *      } else if (task == OP_TASK_NEWX) {
 *         // New successful step: the approximation X, function F, and
 *         // gradient G, are available for inspection.
 *      } else {
 *        // Convergence, or error, or warning
 *        fprintf(stderr, "%s\n", csave);
 *        break;
 *      }
 *      // Computes next step:
 *      task = op_vmlmb_next(x, &f, g, NULL, NULL, csave, isave, dsave);
 *    }
 *
 * A typical invocation of VMLMB for bound-constrained minimization has the
 * following outline:
 *
 *    // Choose a starting vector:
 *    for (i=0 ; i<n ; ++i) x[i] = ... ;
 *  
 *    // Allocate and setup workspaces:
 *    csave = malloc(OP_VMLMB_CSAVE_NUMBER*sizeof(char));
 *    isave = malloc(OP_VMLMB_ISAVE_NUMBER*sizeof(op_integer_t));
 *    dsave = malloc(OP_VMLMB_DSAVE_NUMBER(n, m)*sizeof(double));
 *    task = op_vmlmb_first(n, m, fmin, fatol, frtol, sftol, sgtol, sxtol,
 *                          csave, isave, dsave);
 *    eval = 0; // number of evaluations
 *    for (;;) {
 *      if (task == OP_TASK_FG) {
 *        op_bounds_apply(n, x, xmin, xmax); // aply bound constraints
 *        f = ...;  // evaluate the function at X; store in F
 *        g = ...;  // evaluate the gradient of F at X; store in G
 *        ++eval;
 *      } else if (task == OP_TASK_NEWX) {
 *         // New successful step: the approximation X, function F, and
 *         // gradient G, are available for inspection.
 *      } else {
 *        // Convergence, or error, or warning
 *        fprintf(stderr, "%s\n", csave);
 *        break;
 *      }
 *      // Computes next step:
 *      if (eval == 1 || task == OP_TASK_NEWX) {
 *        // Computes set of active parameters:
 *        op_bounds_active(n, active, x, g, xmin, xmax);
 *      }
 *      task = op_vmlmb_next(x, &f, g, active, NULL, csave, isave, dsave);
 *    }
 *
 *
 * HISTORY:
 *   MINPACK-2 Project. April 1995.
 *   Argonne National Laboratory and University of Minnesota.
 *   Brett M. Averick, Richard G. Carter, and Jorge J. Mor.
 *
 *   C-version and improvements (bound constraints, preconditioning, ...).
 *   February 2003 - March 2003.
 *   Observatoire de Lyon.
 *   Eric Thiebaut.
 */

extern int op_vmlmb_set_fmin(const char csave[], op_integer_t isave[],
			     double dsave[], double new_value,
			     double *old_value);
extern int op_vmlmb_get_fmin(const char csave[], const op_integer_t isave[],
			     const double dsave[], double *ptr);
/*	The function op_vmlmb_set_fmin set the value of parameter FMIN in
	variable metric limited memory (VMLM-B) method to be NEW_VALUE.  If
	FMIN was already set and OLD_VALUE is non-NULL, the old value of FMIN
	is stored at that address.  The function returns true whether FMIN was
	already set.

	The function op_vmlmb_get_fmin queries the current value of parameter
	FMIN in variable metric limited memory (VMLM-B).  If FMIN is set and
	PTR is non-NULL, the value of FMIN is stored at that address.  The
	function returns true whether FMIN is currently set.

	For both functions, CSAVE, ISAVE and DSAVE are the workspace arrays
	used by VMLM-B routines.

	FMIN is a lower bound for the function.  VMLMB exits with a warning if
	F < FMIN.
 */

extern double op_vmlmb_get_step(const char csave[], const op_integer_t isave[],
				const double dsave[]);
extern double op_vmlmb_get_sftol(const char csave[], const op_integer_t isave[],
				 const double dsave[]);
extern double op_vmlmb_get_sgtol(const char csave[], const op_integer_t isave[],
				 const double dsave[]);
extern double op_vmlmb_get_sxtol(const char csave[], const op_integer_t isave[],
				 const double dsave[]);
extern double op_vmlmb_get_frtol(const char csave[], const op_integer_t isave[],
				 const double dsave[]);
extern double op_vmlmb_get_fatol(const char csave[], const op_integer_t isave[],
				 const double dsave[]);
extern double op_vmlmb_get_epsilon(const char csave[], const op_integer_t isave[],
				   const double dsave[]);
extern double op_vmlmb_get_costheta(const char csave[], const op_integer_t isave[],
				    const double dsave[]);
extern op_integer_t op_vmlmb_get_iter(const char csave[],
				      const op_integer_t isave[],
				      const double dsave[]);
extern op_integer_t op_vmlmb_get_nevals(const char csave[],
					const op_integer_t isave[],
					const double dsave[]);
extern op_integer_t op_vmlmb_get_nrestarts(const char csave[],
					   const op_integer_t isave[],
					   const double dsave[]);
/*	Query values of current step size along search direction, curent
	iteration number. */

/*---------------------------------------------------------------------------*/
/* APPLY BOUND CONSTRAINTS */

extern void op_bounds_apply(op_integer_t n, double x[],
			    const double xmin[], const double xmax[]);
/*	Apply bounds constraints to array X.  Input/output array X has N
	elements, XMIN must have N elements (the lower bounds of X) or be
	NULL (no lower bounds), similarly XMAX must have N elements (the
	upper bounds of X) or be NULL (no upper bounds). */

extern void op_bounds_active(op_integer_t n, op_logical_t active[],
			     const double x[], const double g[],
			     const double xmin[], const double xmax[]);
/*	Set elements of ACTIVE to true or false whether the corresponding
	elements in X belong to the active set of parameters or not.
	Output array ACTIVE is an N element array. Input arrays X, XMIN and
	XMAX are the same as in op_bounds_apply.  Input array G is the
	gradient which is an N element array.  The active set of parameters
	verify the following conditions:

	  (X[i] > XMIN[i] || G[i] < 0) && (X[i] < XMAX[i] || G[i] > 0) */

extern op_integer_t op_bounds_check(op_integer_t n, const double xmin[],
				    const double xmax[]);
/*	Check correctness of bounds XMIN and XMAX (see op_bounds_apply for
	the definition of the arguments).  This function returns -1 if the
	bounds are such that XMIN[i] <= XMAX[i] for all i=0,...,N-1;
	otherwise, the function return the value i of the first index (i >=
	0) for which the condition is violated. */

extern void op_lower_bound_apply(op_integer_t n, double x[], double xmin);
extern void op_lower_bound_active(op_integer_t n, op_logical_t active[],
				  const double x[], const double g[],
				  double xmin);
/*	These routines are similar to op_bounds_apply and op_bounds_active but
	for a scalar lower bound XMIN that is the same for all parameters X. */

extern void op_upper_bound_apply(op_integer_t n, double x[], double xmax);
extern void op_upper_bound_active(op_integer_t n, op_logical_t active[],
				  const double x[], const double g[],
				  double xmax);
/*	These routines are similar to op_bounds_apply and op_bounds_active but
	for a scalar upper bound XMAX that is the same for all parameters X. */

extern void op_interval_apply(op_integer_t n, double x[], double a, double b);
extern void op_interval_active(op_integer_t n, op_logical_t active[],
			       const double x[], const double g[],
			       double a, double b);
/*	These routines are similar to op_bounds_apply and op_bounds_active
	but for a scalar lower bound XMIN=min(A,B) and a scalar upper bound
	XMAX=max(A,B) that are the same for all parameters X. */

/*---------------------------------------------------------------------------*/
/* UTILITIES */

#define OP_MSG_LEN 127
#define OP_MSG_SIZE (OP_MSG_LEN + 1)
extern int op_error(char *buf, const char *errmsg);
/* Copy ERRMSG in BUF and return OP_ERROR.  BUF must have at least
   OP_MSG_SIZE bytes.  At most OP_MSG_SIZE - 1 bytes get copied and BUF is
   guaranted to be 0-terminated.  */

extern void op_mcopy(const char *msg, char *buf);
/* Copy string MSG into BUF (if non-NULL).  BUF must have at least
   OP_MSG_SIZE bytes.  At most OP_MSG_SIZE - 1 bytes get copied and BUF is
   guaranted to be 0-terminated.  */

extern double op_dnrm2(op_integer_t n, const double x[]);
/* Returns the Euclidian norm of X: sqrt(X'.X), taking care of overflows. */


extern void op_dcopy(op_integer_t n, const double x[], double y[]);
extern void op_dcopy_active(op_integer_t n, const double x[],
			    double y[], const op_logical_t active[]);
/* Copy elements of X into Y.  Does Y[i] = X[i] for i=0,...,N-1.  If ACTIVE
   is non-NULL, only elements for which ACTIVE[i] is true (non-zero) are
   taken into account. */

extern void op_daxpy(op_integer_t n, double a,
		     const double x[], double y[]);
extern void op_daxpy_active(op_integer_t n, double a,
			    const double x[], double y[],
			    const op_logical_t active[]);
/* Does Y[i] += A*X[i] for i=0,...,N-1.  If ACTIVE is non-NULL, only
   elements for which ACTIVE[i] is true (non-zero) are taken into
   account. */ 

extern double op_ddot(op_integer_t n, const double x[], const double y[]);
extern double op_ddot_active(op_integer_t n, const double x[],
			     const double y[], const op_logical_t active[]);
/* Computes dot product of N-element vectors X and Y. If ACTIVE is
   non-NULL, only elements for which ACTIVE[i] is true (non-zero) are taken
   into account. */

extern void op_dscal(op_integer_t n, double a, double x[]);
/* Scales N-element vector X by scalar A. */ 

/*---------------------------------------------------------------------------*/
/* YORICK-LIKE ROUTINES */

extern int op_anyof(op_integer_t n, const double x[]);
/* Returns true (non-zero) if any element of X is non-zero; returns faslse
   (zero) otherwise. N is the number of elements of X. */

extern int op_noneof(op_integer_t n, const double x[]);
/* Returns true (non-zero) if all elements of X are zero; returns faslse
   (zero) otherwise. N is the number of elements of X. */

extern int op_allof(op_integer_t n, const double x[]);
/* Returns true (non-zero) if all elements of X are non-zero; returns faslse
   (zero) otherwise. N is the number of elements of X. */

/*---------------------------------------------------------------------------*/
_OP_END_DECLS
#endif /* _OPTIMPACK_H */