-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathDemo.pm
104 lines (86 loc) · 3.02 KB
/
Demo.pm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
package PDL::Demos::Simplex;
sub init {'
use PDL::Opt::Simplex;
'}
sub done {'
undef $w;
'}
sub info {('simplex','Simplex optimisation (Req.: PDL::Graphics::Simple)')}
my @demos = (
[comment => q|
This demo illustrates the PDL::Opt::Simplex module.
You must have PDL::Graphics::Simple installed to run it.
The simplex algorithm finds the optimum "point" (coordinates) in a space
you define, which can have any number (called "n" here) of dimensions.
The algorithm takes either a fully-formed cloud of n+1 points, or a
single starting point, in which case it constructs the cloud for you
using the "initsize" parameter. It also takes a function that will take a
series of points in your space, and returns the "value" at each of those
points. From that, it works out which point of the simplex to move to be
closer to the optimum point, which has the lowest value of your function.
It also takes other, less important parameters, which you'll see,
including a "logging" function which you can use to report progress,
or plot data.
|],
[act => q|
# Load the necessary modules, set up a plotting window.
use PDL::Graphics::Simple;
use PDL::Opt::Simplex;
$w = pgswin();
# Try a simple ellipsoid; the multiplier makes the algorithm prioritise
# the first (X) dimension, as you'll see on the plot.
$w->plot(with=>'lines', [0], [1], {xrange=>[-15,5],yrange=>[-15,5]});
my $mult = pdl 4,1;
sub func { (($mult * $_[0]) ** 2)->sumover }
sub logs {
$w->oplot(with=>'lines', $_[0]->glue(1,$_[0]->slice(",0"))->using(0,1));
}
simplex(pdl(-10,-10), 0.5, 0.01, 30,
\&func,
\&logs
);
|],
[act => q|
# Now the first of two examples contributed by Alison Offer.
# These values are for both.
$minsize = 1.e-6; # convergence: if simplex points are this far apart, stop
$maxiter = 100; # max number of iterations: if done these many, stop
# Now we minimise polynomial: (x-3)^2 + 2*(x-3)*(y-2.5) + 3*(y-2.5)^2
$w->plot(with=>'lines', [0], [1], {xrange=>[-1,5],yrange=>[-1,4]}); # reset
$init = pdl [ 0 , 1 ];
$initsize = 2;
($optimum,$ssize,$optval) = simplex($init,$initsize,$minsize,$maxiter,
sub {
my ($x, $y) = $_[0]->using(0,1);
($x-3)**2 + 2*($x-3)*($y-2.5) + 3*($y-2.5)**2;
},
\&logs
);
|],
[act => q|
# Now to minimise least squares Gaussian fit to data + noise:
# 32 *exp (-((x-10)/6)^2) + noise
$factor = 3; # noise factor
# data : gaussian + noise
$j = sequence(20); srandom(5);
$data = 32*exp(-(($j-10)/6)**2) + $factor * (random(20) - 0.5);
$init = pdl [ 33, 9, 12 ];
$initsize = 2;
# The plotting will flatten, i.e. ignore, the third dimension in the vectors.
$w->plot(with=>'lines', [0], [1], {xrange=>[30,36],yrange=>[7,12]}); # reset
($optimum,$ssize,$optval) = simplex($init,$initsize,$minsize,$maxiter,
sub {
my ($x, $y, $z) = map $_[0]->slice($_), 0..2;
(($data - $x*exp(-(($j-$y)/$z)**2))**2)->sumover;
},
\&logs
);
|],
[comment => q|
That concludes the demo of PDL::Opt::Simplex. There are other optimisation
modules for PDL, including PDL::Opt::GLPK, PDL::Opt::NonLinear,
PDL::Opt::QP, PDL::Opt::ParticleSwarm.
|],
);
sub demo { @demos }
1;