summaryrefslogtreecommitdiffstats
path: root/scilab_doc
diff options
context:
space:
mode:
authorMichaŽl Baudin <michael.baudin@scilab.org>2009-10-07 10:57:06 +0200
committerMichaŽl Baudin <michael.baudin@scilab.org>2009-10-07 10:57:06 +0200
commit34b4f308f990c6cee592a0dea01006da6d88f23d (patch)
treef0c6afc97b4fb16b03582eaa3fb40a5a5b82e356 /scilab_doc
parenteae647221bf216f40641191803e83b4690823ff7 (diff)
downloadscilab-34b4f308f990c6cee592a0dea01006da6d88f23d.zip
scilab-34b4f308f990c6cee592a0dea01006da6d88f23d.tar.gz
Added bibliography references
Diffstat (limited to 'scilab_doc')
-rw-r--r--scilab_doc/neldermead/neldermead-bibliography-so.pdfbin0 -> 147878 bytes
-rw-r--r--scilab_doc/neldermead/neldermead.bib54
-rw-r--r--scilab_doc/neldermead/nmbibliography.tex134
3 files changed, 158 insertions, 30 deletions
diff --git a/scilab_doc/neldermead/neldermead-bibliography-so.pdf b/scilab_doc/neldermead/neldermead-bibliography-so.pdf
new file mode 100644
index 0000000..33e0fdb
--- /dev/null
+++ b/scilab_doc/neldermead/neldermead-bibliography-so.pdf
Binary files differ
diff --git a/scilab_doc/neldermead/neldermead.bib b/scilab_doc/neldermead/neldermead.bib
index 6f0ed09..3d6f69b 100644
--- a/scilab_doc/neldermead/neldermead.bib
+++ b/scilab_doc/neldermead/neldermead.bib
@@ -9,7 +9,6 @@ volume = {9},
9number = {1}, 9number = {1},
10pages = {112-147}, 10pages = {112-147},
11keywords = {direct search methods; Nelder--Mead simplex methods; nonderivative optimization}, 11keywords = {direct search methods; Nelder--Mead simplex methods; nonderivative optimization},
12note = {\url{http://link.aip.org/link/?SJE/9/112/1}},
13doi = {10.1137/S1052623496303470} 12doi = {10.1137/S1052623496303470}
14} 13}
15 14
@@ -19,7 +18,6 @@ doi = {10.1137/S1052623496303470}
19 abstract = {A method is described for the minimization of a function of n variables, which depends on the comparison of function values at the (n + 1) vertices of a general simplex, followed by the replacement of the vertex with the highest value by another point. The simplex adapts itself to the local landscape, and contracts on to the final minimum. The method is shown to be effective and computationally compact. A procedure is given for the estimation of the Hessian matrix in the neighbourhood of the minimum, needed in statistical estimation problems. 10.1093/comjnl/7.4.308}, 18 abstract = {A method is described for the minimization of a function of n variables, which depends on the comparison of function values at the (n + 1) vertices of a general simplex, followed by the replacement of the vertex with the highest value by another point. The simplex adapts itself to the local landscape, and contracts on to the final minimum. The method is shown to be effective and computationally compact. A procedure is given for the estimation of the Hessian matrix in the neighbourhood of the minimum, needed in statistical estimation problems. 10.1093/comjnl/7.4.308},
20 author = {Nelder, J. A. and Mead, R. }, 19 author = {Nelder, J. A. and Mead, R. },
21 citeulike-article-id = {3009487}, 20 citeulike-article-id = {3009487},
22 doi = {http://dx.doi.org/10.1093/comjnl/7.4.308},
23 journal = {The Computer Journal}, 21 journal = {The Computer Journal},
24 month = {January}, 22 month = {January},
25 number = {4}, 23 number = {4},
@@ -40,7 +38,6 @@ pages = { ix, 227 p. : },
40year = { 1979 }, 38year = { 1979 },
41type = { Book }, 39type = { Book },
42subjects = { Numerical analysis -- Data processing }, 40subjects = { Numerical analysis -- Data processing },
43catalogue-url = { http://nla.gov.au/nla.cat-vn1060620 },
44} 41}
45 42
46@Book{Kelley1999, 43@Book{Kelley1999,
@@ -49,14 +46,12 @@ author = {C. T. Kelley},
49publisher = {SIAM Frontiers in Applied Mathematics}, 46publisher = {SIAM Frontiers in Applied Mathematics},
50volume={19}, 47volume={19},
51year={1999}, 48year={1999},
52note={\url{http://www.siam.org/books/textbooks/fr18_book.pdf}}
53} 49}
54 50
55@Book{KelleyMethodsOptimizationMatlabCodes, 51@Book{KelleyMethodsOptimizationMatlabCodes,
56title={Iterative Methods for Optimization: Matlab Codes}, 52title={Iterative Methods for Optimization: Matlab Codes},
57author = {C. T. Kelley}, 53author = {C. T. Kelley},
58publisher = {North Carolina State University}, 54publisher = {North Carolina State University},
59note={\url{http://www4.ncsu.edu/~ctk/matlab_darts.html}}
60} 55}
61 56
62 @article{Spendley1962, 57 @article{Spendley1962,
@@ -69,30 +64,13 @@ note={\url{http://www4.ncsu.edu/~ctk/matlab_darts.html}}
69 number = {4}, 64 number = {4},
70 jstor_formatteddate = {Nov., 1962}, 65 jstor_formatteddate = {Nov., 1962},
71 pages = {441--461}, 66 pages = {441--461},
72 url = {http://www.jstor.org/stable/1266283},
73 ISSN = {00401706}, 67 ISSN = {00401706},
74 abstract = {A technique for empirical optimisation is presented in which a sequence of experimental designs each in the form of a regular or irregular simplex is used, each simplex having all vertices but one in common with the preceding simplex, and being completed by one new point. Reasons for the choice of design are outlined, and a formal procedure given. The performance of the technique in the presence and absence of error is studied and it is shown (a) that in the presence of error the rate of advance is inversely proportional to the error standard deviation, so that replication of observations is not beneficial, and (b) that the "efficiency" of the technique appears to increase in direct proportion to the number of factors investigated. It is also noted that, since the direction of movement from each simplex is dependent solely on the ranking of the observations, the technique may be used even in circumstances when a response cannot be quantitatively assessed. Attention is drawn to the ease with which second-order designs having the minimum number of experimental points may be derived from a regular simplex, and a fitting procedure which avoids a direct matrix inversion is suggested. In a brief appendix one or two new rotatable designs derivable from a simplex are noted.}, 68 abstract = {A technique for empirical optimisation is presented in which a sequence of experimental designs each in the form of a regular or irregular simplex is used, each simplex having all vertices but one in common with the preceding simplex, and being completed by one new point. Reasons for the choice of design are outlined, and a formal procedure given. The performance of the technique in the presence and absence of error is studied and it is shown (a) that in the presence of error the rate of advance is inversely proportional to the error standard deviation, so that replication of observations is not beneficial, and (b) that the "efficiency" of the technique appears to increase in direct proportion to the number of factors investigated. It is also noted that, since the direction of movement from each simplex is dependent solely on the ranking of the observations, the technique may be used even in circumstances when a response cannot be quantitatively assessed. Attention is drawn to the ease with which second-order designs having the minimum number of experimental points may be derived from a regular simplex, and a fitting procedure which avoids a direct matrix inversion is suggested. In a brief appendix one or two new rotatable designs derivable from a simplex are noted.},
75 publisher = {American Statistical Association and American Society for Quality}, 69 publisher = {American Statistical Association and American Society for Quality},
76 language = {}, 70 language = {},
77 copyright = {Copyright © 1962 American Statistical Association and American Society for Quality}, 71 copyright = {Copyright 1962 American Statistical Association and American Society for Quality},
78 year = {1962}, 72 year = {1962},
79 } 73 }
80@book{citeulike:2815920,
81 abstract = {The only book on the market devoted to sequential simplex optimization This book presents an easy-to-learn, effective optimization technique that can be applied immediately to many problems in the real world. The sequential simplex is an evolutionary operation (EVOP) technique that uses experimental results- it does not require a mathematical model. The authors present their subject with a level of detail and clarity that is refreshingly welcome in a technical text. The basics are presented first, followed by a detailed discussion of the fine points needed to get the most out of this optimization technique. Worksheets are provided and their use is illustrated with step-by-step worked examples. This makes the logic and calculations of the simplex algorithms easy to understand and follow. The text also provides more than 200 figures and over 500 references to sequential simplex applications, which allows rapid access to specific examples of the use of the technique in a wide range of applications. Sequential Simplex Optimization: A Technique for Improving Quality and Productivity in Research, Development, and Manufacturing is essential for any student or professional who desires to learn this innovative technique quickly and easily.},
82 author = {Walters, Fred H. and Jr, Lloyd R. and Morgan, Stephen L. and Deming, Stanley N. },
83 citeulike-article-id = {2815920},
84 howpublished = {Hardcover},
85 isbn = {0849358949},
86 keywords = {multiobjective, optimization, simplex},
87 month = {August},
88 posted-at = {2008-10-23 13:13:43},
89 priority = {2},
90 publisher = {CRC},
91 series = {Chemometrics series},
92 title = {Sequential Simplex Optimization: A Technique for Improving Quality and Productivity in Research, Development, and Manufacturing},
93 url = {http://www.amazon.ca/exec/obidos/redirect?tag=citeulike09-20\&amp;path=ASIN/0849358949},
94 year = {1991}
95}
96@book{NumericalRecipes, 74@book{NumericalRecipes,
97author = {W. H. Press and Saul A. Teukolsky and William T. Vetterling and Brian P. Flannery}, 75author = {W. H. Press and Saul A. Teukolsky and William T. Vetterling and Brian P. Flannery},
98title = {Numerical Recipes in C, Second Edition}, 76title = {Numerical Recipes in C, Second Edition},
@@ -253,8 +231,7 @@ publisher= {}}
253 title = {Multiobjective Optimization in Engineering Design: Application to Fluid Power Systems}, 231 title = {Multiobjective Optimization in Engineering Design: Application to Fluid Power Systems},
254 institution = {Department of Mechanical Engineering, Link\"oping University}, 232 institution = {Department of Mechanical Engineering, Link\"oping University},
255 year = {2001}, 233 year = {2001},
256 note={\url{https://polopoly.liu.se/content/1/c6/10/99/74/phdthesis.pdf}, 234 note={\url{https://polopoly.liu.se/content/1/c6/10/99/74/phdthesis.pdf}}
257 \url{http://www.iei.liu.se/machine/johan-olvander}}
258} 235}
259 236
260@article{Box1965, 237@article{Box1965,
@@ -487,3 +464,30 @@ PAGES = {2928}
487 pages = {396--401} 464 pages = {396--401}
488} 465}
489 466
467@book{WaltersParkerMorganDeming1991,
468 author = {Walters, F. S. and Parker, L. R. and Morgan, S. L. and Deming, S. N.},
469 title = {Sequential Simplex Optimization for Quality and Productivity in Research, Development, and Manufacturing},
470 year = {1991},
471 publisher= {CRC Press, Boca Raton, FL},
472 series = {Chemometrics series},
473}
474
475@ARTICLE{VanDerWiel1980,
476 AUTHOR = {Van Der Wiel, P.F.A. },
477 TITLE = {Improvement of the super modified simplex optimisation procedure},
478 YEAR = {1980},
479 JOURNAL = {Analytica Chemica Acta},
480 VOLUME = {122},
481 PAGES = {421?-433},
482}
483
484@ARTICLE{Routh1977,
485 author = {Routh, M. W. and Swartz, P.A. and Denton, M.B.},
486 title = {Performance of the Super Modified Simplex},
487 journal = {Analytical Chemistry},
488 year = {1977},
489 volume = {49},
490 number = {9},
491 pages = {1422--1428}
492}
493
diff --git a/scilab_doc/neldermead/nmbibliography.tex b/scilab_doc/neldermead/nmbibliography.tex
index 6b525c0..8aee50c 100644
--- a/scilab_doc/neldermead/nmbibliography.tex
+++ b/scilab_doc/neldermead/nmbibliography.tex
@@ -1,7 +1,7 @@
1\chapter{Nelder-Mead bibliography} 1\chapter{Nelder-Mead bibliography}
2 2
3In this section, we present a brief overview of selected 3In this section, we present a brief overview of selected
4papers, sorted in chronological order, which deals with 4papers, sorted in chronological order, which deal with
5the Nelder-Mead algorithm 5the Nelder-Mead algorithm
6 6
7\section{Spendley, Hext, Himsworth, 1962} 7\section{Spendley, Hext, Himsworth, 1962}
@@ -98,7 +98,7 @@ The initial simplex is computed axis-by-axis, given the initial guess
98and a vector of step lengths. 98and a vector of step lengths.
99A factorial test is used to check if the computed optimum point is a local minimum. 99A factorial test is used to check if the computed optimum point is a local minimum.
100 100
101\subsection{Parkinson and Hutchinson, 1972} 101\section{Parkinson and Hutchinson, 1972}
102 102
103In \cite{parkinson1972}, "An investigation into the efficiency of variants on the simplex method", 103In \cite{parkinson1972}, "An investigation into the efficiency of variants on the simplex method",
104Parkinson and Hutchinson explored 104Parkinson and Hutchinson explored
@@ -145,6 +145,131 @@ complex points, and every point on the line segment joining these two points
145all have functional values lower than the functional values at each of the 145all have functional values lower than the functional values at each of the
146remaining complex points. 146remaining complex points.
147 147
148\section{Routh, Swartz, Denton, 1977}
149
150"Performance of the Super-Modified Simplex",
151M.W. Routh, P.A. Swartz, M.B. Denton,
152Analytical Chemistry,
1531977
154
155In this article \cite{Routh1977},
156Routh, Swartz and Denton present a variant
157of the Nelder-Mead algorithm, which is called
158the Modified Simplex Method (SMS) in their
159paper. The algorithm is modified in the following
160way. After determination of the worst response (W),
161the responses at the centroid (C) and reflected (R)
162vertices are measured and a second-order polynomial
163curve is fitted to the responses at W, C and R.
164Furthermore, the curve is extrapolated beyond W
165and R by a percentage of the W-R vector resulting
166in two types of curve shapes. In the concave down
167case, a maximum occurs within the interval.
168Assuming a maximization process, evaluation of the
169derivative of the curve reveals the location of the
170predicted optimum whose response is subsequently
171evaluated, the new vertex is located at that position,
172and the optimization process is continued.
173In the concave up case, a response maximum
174does not occur within the interval so the extended
175interval boundary producing the highest predicted
176response is chosen as the new vertex location,
177its response is determined, and the optimization
178is continued. If the response at the predicted
179extended interval boundary location does not
180prove to be greater than the response at R, the
181vertex R may instead be retained as the new
182vertex and the process continued.
183The slope at the extended interval boundary
184may additionally be evaluated dictating the
185magnitude of the expansion coefficient, i.e.
186the greater the slope (indicating rapid approach
187to the optimum location), the smaller the required
188expansion coefficient and, conversely, the smaller
189the slope (indicating remoteness from the
190optimum location), the larger the required expansion
191coefficient.
192
193Some additional safeguard procedure must be used
194in order to prevent the collapse of the simplex.
195
196\section{Van Der Wiel, 1980}
197
198"Improvement of the Super-Modified Simplex
199Optimization Procedure",
200P.F.A., Van Der Wiel
201Analytica Chimica Acta,
2021980
203
204In this article \cite{VanDerWiel1980}, Van Der Wiel
205tries to improve the SMS method by Routh et al..
206His modifications are based on a Gaussian fit,
207weighted reflection point and estimation of response
208at the reflection point. Van Der Wiel presents
209a simplified pseudo-code for one algorithm
210The method is tested in 5 cases, where the cost
211function is depending on the exponential function.
212
213\section{Walters, Parker, Morgan and Deming, 1991}
214
215"Sequential Simplex Optimization for Quality and Productivity in Research, Development, and Manufacturing",
216F. S. Walters, L. R. Parker, Jr., S. L. Morgan, and S. N. Deming, 1991
217
218In this book \cite{WaltersParkerMorganDeming1991}, Walters, Parker, Morgan and Deming
219give a broad view on the simplex methods in chemistry.
220The Spendley et al. and Nelder-Mead algorithms are particularily deeply analyzed,
221with many experiments analyzed in great detail.
222Template tables are given, so that an engineer can manually perform the optimization
223and make the necessary calculations.
224Practical advices are given, which allow to make a better use of the algorithms.
225
226In chapter 5, "Comments on Fixed-size and Variable-size Simplexes",
227comparing the path of the two algorithms allows to check that a real optimum
228has been found.
229When the authors analyze the graph produced by the response depending on the number of
230iteration, the general behavior of the fixed-size algorithm is made of four steps.
231Gains in response are initially rapid, but the rate of return decreases as the
232simplex probes to find the ridge and then moves along the shallower
233ridge to find the optimum.
234The behavior from different starting locations is also analyzed.
235Varying the size of the initial simplex is also analyzed for the fixed-size
236simplex algorithm. The many iterations which are produced when a tiny
237initial simplex is used with the fixed-size simplex is emphasized.
238
239The chapter 6, "General Considerations", warns that the user may
240setup an degenerate initial simplex, leading to a false convergence
241of the algorithm. Various other initial simplices are analyzed.
242Modifications in the algorithm to take into account for bounds
243contraints are presented.
244The behavior of the fixed-size and variable-size simplex algorithms
245is analyzed when the simplex converges.
246The "k+1" rule, introduced by Spendley et al. to take into account
247for noise in the cost function is presented.
248
249The chapter 7, "Additional Concerns and Topics" deals with advanced questions
250regarding these algorithms. The variable size simplex algorithm is analyzed
251in the situation of a ridge. Partially oscillatory collapse of the
252Nelder-Mead algorithm is presented. The same behavior is
253presented in the case of a saddle point. This clearly shows that
254practionners were aware of the convergence problem of this
255algorithm well before Mc Kinnon presented a simple counter example (in 1998).
256The "Massive Contraction" step of Nelder and Mead is presented as a
257solution for this oscillatory behavior. The authors present
258a method, due to Ernst, which allows to keep the volume of the
259simplex, instead of shrinking it. This method is based on a
260translation of the simplex. This modification requires
261$n+1$ function evaluations. A more efficient method, due to King,
262is based on reflection with respect to the next-to-worst
263vertex. This modification was first suggested by Spendley et al.
264in their fixed-size simplex algorithm.
265
266In the same chapter, the authors present the behavior of the algorithms
267in the case of multiple optima. They also present briefly other
268types of simplex algorithms.
269
270A complete bibliography (from 1962 to 1990) on simplex-based optimization is given in
271the end of the book.
272
148\section{Subrahmanyam, 1989} 273\section{Subrahmanyam, 1989}
149 274
150"An extension of the simplex method to constrained nonlinear optimization", 275"An extension of the simplex method to constrained nonlinear optimization",
@@ -235,7 +360,7 @@ accumulation point of the simplices is a critical point of f.
235 360
236The same ideas are presented in the book \cite{Kelley1999}. 361The same ideas are presented in the book \cite{Kelley1999}.
237 362
238\subsection{Han, 2000} 363\section{Han, 2000}
239 364
240In his Phd thesis \cite{Han2000}, Lixing Han analyzes the 365In his Phd thesis \cite{Han2000}, Lixing Han analyzes the
241properties of the Nelder-Mead algorithm. 366properties of the Nelder-Mead algorithm.
@@ -339,7 +464,7 @@ Johan Andersson, 2001
339This PhD thesis \cite{Andersson01multiobjectiveoptimization} gives a brief overview of the Complex method by Box in 464This PhD thesis \cite{Andersson01multiobjectiveoptimization} gives a brief overview of the Complex method by Box in
340section 5.1. 465section 5.1.
341 466
342\subsection{Peters, Bolte, Marschner, N\"{u}ssen and Laur, 2002} 467\section{Peters, Bolte, Marschner, N\"{u}ssen and Laur, 2002}
343 468
344In \cite{590075}, "Enhanced Optimization Algorithms for the Development of Microsystems", 469In \cite{590075}, "Enhanced Optimization Algorithms for the Development of Microsystems",
345the authors combine radial basis function interpolation methods 470the authors combine radial basis function interpolation methods
@@ -396,4 +521,3 @@ Singer and Nelder
396This article is a complete review of the Nelder-Mead algorithm. 521This article is a complete review of the Nelder-Mead algorithm.
397Restarting the algorithm is adviced when a premature termination occurs. 522Restarting the algorithm is adviced when a premature termination occurs.
398 523
399