COPASI API  4.16.103
COptMethodHookeJeeves.cpp
Go to the documentation of this file.
1 // Begin CVS Header
2 // $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/optimization/COptMethodHookeJeeves.cpp,v $
3 // $Revision: 1.18 $
4 // $Name: $
5 // $Author: shoops $
6 // $Date: 2012/06/20 21:16:37 $
7 // End CVS Header
8 
9 // Copyright (C) 2012 - 2010 by Pedro Mendes, Virginia Tech Intellectual
10 // Properties, Inc., University of Heidelberg, and The University
11 // of Manchester.
12 // All rights reserved.
13 
14 // Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
15 // Properties, Inc., EML Research, gGmbH, University of Heidelberg,
16 // and The University of Manchester.
17 // All rights reserved.
18 
19 // Copyright (C) 2001 - 2007 by Pedro Mendes, Virginia Tech Intellectual
20 // Properties, Inc. and EML Research, gGmbH.
21 // All rights reserved.
22 
23 // hoojee.cpp : optimization by the method of Hooke and Jeeves
24 //
25 
26 /* Nonlinear Optimization using the algorithm of Hooke and Jeeves */
27 /* 12 February 1994 author: Mark G. Johnson */
28 
29 /* Adapted for Gepasi by Pedro Mendes, 18 August 1997 */
30 #include <cmath>
31 
32 #include "copasi.h"
33 
34 #include "COptMethodHookeJeeves.h"
35 #include "COptProblem.h"
36 #include "COptItem.h"
37 #include "COptTask.h"
38 
40 
42  COptMethod(CCopasiTask::optimization, CCopasiMethod::HookeJeeves, pParent)
43 {
44  addParameter("Iteration Limit", CCopasiParameter::UINT, (unsigned C_INT32) 50);
45  addParameter("Tolerance", CCopasiParameter::DOUBLE, (C_FLOAT64) 1.e-005);
47 
48  initObjects();
49 }
50 
52  const CCopasiContainer * pParent):
53  COptMethod(src, pParent)
54 {
55  initObjects();
56 }
57 
59 {cleanup();}
60 
62 {
63  mContinue = true;
64 
65  if (!initialize())
66  {
67  if (mpCallBack)
69 
70  return false;
71  }
72 
73  C_FLOAT64 newf, steplength, tmp;
74  bool Keep;
75  size_t iadj;
76 
77  // initial point is first guess
78  size_t i;
79 
80  // initialise the guess vector
81  for (i = 0; i < mVariableSize; i++)
82  {
83  C_FLOAT64 & mut = mIndividual[i];
84  COptItem & OptItem = *(*mpOptItem)[i];
85 
86  mut = OptItem.getStartValue();
87 
88  // force it to be within the bounds
89  switch (OptItem.checkConstraint(mut))
90  {
91  case - 1:
92  mut = *OptItem.getLowerBoundValue();
93  break;
94 
95  case 1:
96  mut = *OptItem.getUpperBoundValue();
97  break;
98  }
99 
100  // We need to set the value here so that further checks take
101  // account of the value.
102  (*(*mpSetCalculateVariable)[i])(mut);
103  }
104 
105  mContinue &= evaluate();
106 
107  // The first value is also the best
111 
112  if (!mContinue)
113  {
114  if (mpCallBack)
116 
117  cleanup();
118  return true;
119  }
120 
121  for (i = 0; i < mVariableSize; i++)
122  {
123  mNew[i] = mBefore[i] = mIndividual[i];
124  mDelta[i] = fabs(mIndividual[i] * mRho);
125 
126  if (mDelta[i] == 0.0) mDelta[i] = mRho;
127  }
128 
129  iadj = 0;
130  steplength = mRho;
131 
132  newf = mBestValue;
133 
134  while ((mIteration < mIterationLimit) && (steplength > mTolerance) && mContinue)
135  {
136  // signal another iteration to Gepasi
137  if (mpCallBack)
139 
140  mIteration++;
141  iadj++;
142 
143  /* find best new point, one coord at a time */
144  mNew = mBefore;
145 
146  newf = bestNearby();
147 
148  /* if we made some improvements, pursue that direction */
149  Keep = true;
150 
151  while ((newf < mBestValue) && Keep && mContinue)
152  {
153  // We found a better value
154  mBestValue = newf;
157 
158  iadj = 0;
159 
160  for (i = 0; i < mVariableSize; i++)
161  {
162  C_FLOAT64 & mut = mNew[i];
163  COptItem & OptItem = *(*mpOptItem)[i];
164 
165  /* firstly, arrange the sign of mDelta[] */
166  if (mut <= mBefore[i])
167  mDelta[i] = - fabs(mDelta[i]);
168  else
169  mDelta[i] = fabs(mDelta[i]);
170 
171  /* now, move further in this direction */
172  tmp = mBefore[i];
173  mBefore[i] = mut;
174  mut = mut + mut - tmp;
175 
176  // force it to be within the bounds
177  switch (OptItem.checkConstraint(mut))
178  {
179  case - 1:
180  mut = *OptItem.getLowerBoundValue();
181  break;
182 
183  case 1:
184  mut = *OptItem.getUpperBoundValue();
185  break;
186  }
187 
188  // We need to set the value here so that further checks take
189  // account of the value.
190  (*(*mpSetCalculateVariable)[i])(mut);
191  }
192 
193  newf = bestNearby();
194 
195  /* if the further (optimistic) move was bad.... */
196  if (newf >= mBestValue) break;
197 
198  /* make sure that the differences between the new */
199  /* and the old points are due to actual */
200  /* displacements; beware of roundoff errors that */
201  /* might cause newf < mBestValue */
202  Keep = false;
203 
204  for (i = 0; i < mVariableSize; i++)
205  if (fabs(mNew[i] - mBefore[i]) > (0.5 * fabs(mDelta[i])))
206  {
207  Keep = true;
208  break;
209  }
210  }
211 
212  if ((steplength >= mTolerance) && (newf >= mBestValue))
213  {
214  steplength = steplength * mRho;
215 
216  for (i = 0; i < mVariableSize; i++)
217  mDelta[i] *= mRho;
218  }
219  }
220 
221  if (mpCallBack)
223 
224  cleanup();
225  return true;
226 }
227 
229 {
231 }
232 
234 {
235  cleanup();
236 
237  if (!COptMethod::initialize()) return false;
238 
239  mIterationLimit = * getValue("Iteration Limit").pUINT;
240  mTolerance = * getValue("Tolerance").pDOUBLE;
241  mRho = * getValue("Rho").pDOUBLE;
242 
243  mIteration = 0;
244 
245  if (mpCallBack)
246  mhIteration =
247  mpCallBack->addItem("Current Iteration",
248  mIteration,
249  & mIterationLimit);
250 
251  mVariableSize = mpOptItem->size();
252 
257 
258  mBestValue = std::numeric_limits<C_FLOAT64>::infinity();
259 
260  return true;
261 }
262 
264 {
265  return true;
266 }
267 
269 {
270  // We do not need to check whether the parametric constraints are fulfilled
271  // since the parameters are created within the bounds.
272 
273  // evaluate the fitness
275  {
277  return mContinue;
278  }
279 
281 
282  // check whether the functional constraints are fulfilled
284  mEvaluationValue = std::numeric_limits<C_FLOAT64>::infinity();
285  else
286  // get the value of the objective function
288 
289  return mContinue;
290 }
291 
292 // given a point, look for a better one nearby, one coord at a time
294 {
295  C_FLOAT64 minf = mBestValue;
296  size_t i;
297 
298  mIndividual = mNew;
299 
300  for (i = 0; i < mVariableSize; i++)
301  (*(*mpSetCalculateVariable)[i])(mIndividual[i]);
302 
303  for (i = 0; i < mVariableSize; i++)
304  {
305  C_FLOAT64 & mut = mIndividual[i];
306  COptItem & OptItem = *(*mpOptItem)[i];
307  mut = mNew[i] + mDelta[i];
308 
309  // force it to be within the bounds
310  switch (OptItem.checkConstraint(mut))
311  {
312  case - 1:
313  mut = *OptItem.getLowerBoundValue();
314  break;
315 
316  case 1:
317  mut = *OptItem.getUpperBoundValue();
318  break;
319  }
320 
321  // We need to set the value here so that further checks take
322  // account of the value.
323  (*(*mpSetCalculateVariable)[i])(mut);
324 
325  if (!evaluate()) break;
326 
327  if (mEvaluationValue < minf)
328  minf = mEvaluationValue;
329  else
330  {
331  mDelta[i] = - mDelta[i];
332  mut = mNew[i] + mDelta[i];
333 
334  // force it to be within the bounds
335  switch (OptItem.checkConstraint(mut))
336  {
337  case - 1:
338  mut = *OptItem.getLowerBoundValue();
339  break;
340 
341  case 1:
342  mut = *OptItem.getUpperBoundValue();
343  break;
344  }
345 
346  // We need to set the value here so that further checks take
347  // account of the value.
348  (*(*mpSetCalculateVariable)[i])(mut);
349 
350  if (!evaluate()) break;
351 
352  if (mEvaluationValue < minf)
353  minf = mEvaluationValue;
354  else
355  {
356  mut = mNew[i];
357  (*(*mpSetCalculateVariable)[i])(mut);
358  }
359  }
360  }
361 
362  mNew = mIndividual;
363 
364  return(minf);
365 }
366 
367 /* Find a point X where the nonlinear function f(X) has a local */
368 /* minimum. X is an n-vector and f(X) is a scalar. In mathe- */
369 /* matical notation f: R^n -> R^1. The objective function f() */
370 /* is not required to be continuous. Nor does f() need to be */
371 /* differentiable. The program does not use or require */
372 /* derivatives of f(). */
373 
374 /* The software user supplies three things: a subroutine that */
375 /* computes f(X), an initial "starting guess" of the minimum point */
376 /* X, and values for the algorithm convergence parameters. Then */
377 /* the program searches for a local minimum, beginning from the */
378 /* starting guess, using the Direct Search algorithm of Hooke and */
379 /* Jeeves. */
380 
381 /* This C program is adapted from the Algol pseudocode found in */
382 /* "Algorithm 178: Direct Search" by Arthur F. Kaupe Jr., Commun- */
383 /* ications of the ACM, Vol 6. p.313 (June 1963). It includes the */
384 /* improvements suggested by Bell and Pike (CACM v.9, p. 684, Sept */
385 /* 1966) and those of Tomlin and Smith, "Remark on Algorithm 178" */
386 /* (CACM v.12). The original paper, which I don't recommend as */
387 /* highly as the one by A. Kaupe, is: R. Hooke and T. A. Jeeves, */
388 /* "Direct Search Solution of Numerical and Statistical Problems", */
389 /* Journal of the ACM, Vol. 8, April 1961, pp. 212-229. */
390 
391 /* Calling sequence: */
392 /* int hooke(mVariableSize, startpt, endpt, mRho, epsilon, mIterationLimit) */
393 /* */
394 /* mVariableSize {an integer} This is the number of dimensions */
395 /* in the domain of f(). It is the number of */
396 /* coordinates of the starting point (and the */
397 /* minimum point.) */
398 /* startpt {an array of C_FLOAT64s} This is the user- */
399 /* supplied guess at the minimum. */
400 /* endpt {an array of C_FLOAT64s} This is the location of */
401 /* the local minimum, calculated by the program */
402 /* mRho {a C_FLOAT64} This is a user-supplied convergence */
403 /* parameter (more detail below), which should be */
404 /* set to a value between 0.0 and 1.0. Larger */
405 /* values of mRho give greater probability of */
406 /* convergence on highly nonlinear functions, at a */
407 /* cost of more function evaluations. Smaller */
408 /* values of mRho reduces the number of evaluations */
409 /* (and the program running time), but increases */
410 /* the risk of nonconvergence. See below. */
411 /* epsilon {a C_FLOAT64} This is the criterion for halting */
412 /* the search for a minimum. When the algorithm */
413 /* begins to make less and less progress on each */
414 /* iteration, it checks the halting criterion: if */
415 /* the stepsize is below epsilon, terminate the */
416 /* iteration and return the current best estimate */
417 /* of the minimum. Larger values of epsilon (such */
418 /* as 1.0e-4) give quicker running time, but a */
419 /* less accurate estimate of the minimum. Smaller */
420 /* values of epsilon (such as 1.0e-7) give longer */
421 /* running time, but a more accurate estimate of */
422 /* the minimum. */
423 /* mIterationLimit {an integer} A second, rarely used, halting */
424 /* criterion. If the algorithm uses >= mIterationLimit */
425 /* iterations, halt. */
426 
427 /* The user-supplied objective function f(x,n) should return a C */
428 /* "C_FLOAT64". Its arguments are x -- an array of C_FLOAT64s, and */
429 /* n -- an integer. x is the point at which f(x) should be */
430 /* evaluated, and n is the number of coordinates of x. That is, */
431 /* n is the number of coefficients being fitted. */
432 
433 /* mRho, the algorithm convergence control */
434 /* The algorithm works by taking "steps" from one estimate of */
435 /* a minimum, to another (hopefully better) estimate. Taking */
436 /* big steps gets to the minimum more quickly, at the risk of */
437 /* "stepping right over" an excellent point. The stepsize is */
438 /* controlled by a user supplied parameter called mRho. At each */
439 /* iteration, the stepsize is multiplied by mRho (0 < mRho < 1), */
440 /* so the stepsize is successively reduced. */
441 /* Small values of mRho correspond to big stepsize changes, */
442 /* which make the algorithm run more quickly. However, there */
443 /* is a chance (especially with highly nonlinear functions) */
444 /* that these big changes will accidentally overlook a */
445 /* promising search vector, leading to nonconvergence. */
446 /* Large values of mRho correspond to small stepsize changes, */
447 /* which force the algorithm to carefully examine nearby points */
448 /* instead of optimistically forging ahead. This improves the */
449 /* probability of convergence. */
450 /* The stepsize is reduced until it is equal to (or smaller */
451 /* than) epsilon. So the number of iterations performed by */
452 /* Hooke-Jeeves is determined by mRho and epsilon: */
453 /* mRho**(number_of_iterations) = epsilon */
454 /* In general it is a good idea to set mRho to an aggressively */
455 /* small value like 0.5 (hoping for fast convergence). Then, */
456 /* if the user suspects that the reported minimum is incorrect */
457 /* (or perhaps not accurate enough), the program can be run */
458 /* again with a larger value of mRho such as 0.85, using the */
459 /* result of the first minimization as the starting guess to */
460 /* begin the second minimization. */
461 
462 /* Normal use: (1) Code your function f() in the C language */
463 /* (2) Install your starting guess {or read it in} */
464 /* (3) Run the program */
465 /* (4) {for the skeptical}: Use the computed minimum */
466 /* as the starting point for another run */
467 
468 /* Data Fitting: */
469 /* Code your function f() to be the sum of the squares of the */
470 /* errors (differences) between the computed values and the */
471 /* measured values. Then minimize f() using Hooke-Jeeves. */
472 /* EXAMPLE: you have 20 datapoints (ti, yi) and you want to */
473 /* find A,B,C such that (A*t*t) + (B*exp(t)) + (C*tan(t)) */
474 /* fits the data as closely as possible. Then f() is just */
475 /* f(x) = SUM (measured_y[i] - ((A*t[i]*t[i]) + (B*exp(t[i])) */
476 /* + (C*tan(t[i]))))^2 */
477 /* where x[] is a 3-vector consisting of {A, B, C}. */
478 
479 /* */
480 /* The author of this software is M.G. Johnson. */
481 /* Permission to use, copy, modify, and distribute this software */
482 /* for any purpose without fee is hereby granted, provided that */
483 /* this entire notice is included in all copies of any software */
484 /* which is or includes a copy or modification of this software */
485 /* and in all copies of the supporting documentation for such */
486 /* software. THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT */
487 /* ANY EXPRESS OR IMPLIED WARRANTY. IN PARTICULAR, NEITHER THE */
488 /* AUTHOR NOR AT&T MAKE ANY REPRESENTATION OR WARRANTY OF ANY */
489 /* KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS */
490 /* FITNESS FOR ANY PARTICULAR PURPOSE. */
491 /* */
virtual C_INT32 checkConstraint() const
Definition: COptItem.cpp:401
COptTask * mpParentTask
Definition: COptMethod.h:58
virtual bool initialize()
Definition: COptMethod.cpp:189
unsigned C_INT32 mIterationLimit
void resize(size_t size, const bool &copy=false)
Definition: CVector.h:301
COptProblem * mpOptProblem
Definition: COptMethod.h:56
virtual void output(const COutputInterface::Activity &activity)
#define C_INT32
Definition: copasi.h:90
virtual bool progressItem(const size_t &handle)
COptMethodHookeJeeves(const COptMethodHookeJeeves &src, const CCopasiContainer *pParent=NULL)
CVector< C_FLOAT64 > mBefore
virtual bool calculate()
size_t addItem(const std::string &name, const std::string &value, const std::string *pEndValue=NULL)
const std::vector< UpdateMethod * > * mpSetCalculateVariable
Definition: COptMethod.h:65
const Value & getValue() const
const C_FLOAT64 * getLowerBoundValue() const
Definition: COptItem.h:191
unsigned C_INT32 * pUINT
virtual bool setSolution(const C_FLOAT64 &value, const CVector< C_FLOAT64 > &variables)
virtual bool finishItem(const size_t &handle)
virtual bool checkFunctionalConstraints()
CVector< C_FLOAT64 > mIndividual
const C_FLOAT64 & getStartValue() const
Definition: COptItem.cpp:199
const std::vector< COptItem * > * mpOptItem
Definition: COptMethod.h:70
#define C_FLOAT64
Definition: copasi.h:92
const C_FLOAT64 * getUpperBoundValue() const
Definition: COptItem.h:198
bool addParameter(const CCopasiParameter &parameter)
virtual bool checkParametricConstraints()
CVector< C_FLOAT64 > mNew
const C_FLOAT64 & getCalculateValue() const
CVector< C_FLOAT64 > mDelta
CCopasiObject * addObjectReference(const std::string &name, CType &reference, const unsigned C_INT32 &flag=0)
CProcessReport * mpCallBack
#define max(a, b)
Definition: f2c.h:176