diff --git a/.gitignore b/.gitignore index fa008b27..5b7b65e5 100755 --- a/.gitignore +++ b/.gitignore @@ -221,10 +221,6 @@ tags ### VisualStudioCode ### .vscode/* -!.vscode/settings.json -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json ### VisualStudioCode Patch ### # Ignore all local history of files diff --git a/doc/examples/algorithms/plot_algorithm_mestatic_IO.py b/doc/examples/algorithms/plot_algorithm_mestatic_IO.py index a3e4d4ec..af017a5b 100755 --- a/doc/examples/algorithms/plot_algorithm_mestatic_IO.py +++ b/doc/examples/algorithms/plot_algorithm_mestatic_IO.py @@ -110,7 +110,6 @@ def deq(rho, t, alpha, beta, gamma): n_grid=[100, 100], coords=grid.coords, results=results, - fn_out=None, camera_pos=[45., 65]) # On Windows subprocesses will import (i.e. execute) the main module at start. diff --git a/doc/examples/algorithms/plot_algorithm_mestaticprojection.py b/doc/examples/algorithms/plot_algorithm_mestaticprojection.py index 8853483c..6baf097e 100755 --- a/doc/examples/algorithms/plot_algorithm_mestaticprojection.py +++ b/doc/examples/algorithms/plot_algorithm_mestaticprojection.py @@ -31,15 +31,15 @@ # gPC options options = dict() options["method"] = "reg" -options["solver"] = "Moore-Penrose" +options["solver"] = "LarsLasso" options["settings"] = None -options["order"] = [3, 3] -options["order_max"] = 3 +options["order"] = [5, 5] +options["order_max"] = 5 options["interaction_order"] = 2 options["n_cpu"] = 0 -options["gradient_enhanced"] = True -options["gradient_calculation"] = "FD_fwd" -options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} +# options["gradient_enhanced"] = False +# options["gradient_calculation"] = "FD_fwd" +# options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} options["error_type"] = "nrmsd" options["n_samples_validation"] = 1e3 options["qoi"] = "all" @@ -52,7 +52,7 @@ options["save_session_format"] = save_session_format options["grid"] = pygpc.Random options["grid_options"] = {"seed": 1} -options["n_grid"] = 1000 +options["n_grid"] = 2000 options["adaptive_sampling"] = False # define algorithm diff --git a/doc/examples/algorithms/plot_algorithm_static_IO.py b/doc/examples/algorithms/plot_algorithm_static_IO.py index 42e88b9a..8b14e237 100755 --- a/doc/examples/algorithms/plot_algorithm_static_IO.py +++ b/doc/examples/algorithms/plot_algorithm_static_IO.py @@ -59,6 +59,12 @@ options["backend"] = "omp" options["verbose"] = True +# determine number of gPC coefficients (hint: compare it with the amount of output data you have) +n_coeffs = pygpc.get_num_coeffs_sparse(order_dim_max=options["order"], + order_glob_max=options["order_max"], + order_inter_max=options["interaction_order"], + dim=len(parameters)) + # define algorithm algorithm = pygpc.Static_IO(parameters=parameters, options=options, grid=grid, results=results) @@ -87,15 +93,19 @@ calc_pdf=True, algorithm="standard") +# get a summary of the sensitivity coefficients +sobol, gsens = pygpc.get_sens_summary(fn_results, parameters) +print(sobol) +print(gsens) + # plot gPC approximation and IO data pygpc.plot_gpc(session=session, coeffs=coeffs, random_vars=["x1", "x3"], - output_idx= 0, + output_idx=0, n_grid=[100, 100], coords=grid.coords, - results=results, - fn_out=None) + results=results) # On Windows subprocesses will import (i.e. execute) the main module at start. # You need to insert an if __name__ == '__main__': guard in the main module to avoid diff --git a/doc/examples/algorithms/tmp/mestaticprojection.hdf5 b/doc/examples/algorithms/tmp/mestaticprojection.hdf5 old mode 100755 new mode 100644 index e3241748..0b39fb24 Binary files a/doc/examples/algorithms/tmp/mestaticprojection.hdf5 and b/doc/examples/algorithms/tmp/mestaticprojection.hdf5 differ diff --git a/doc/examples/algorithms/tmp/mestaticprojection.pkl b/doc/examples/algorithms/tmp/mestaticprojection.pkl index 5a372ee5..64971da3 100755 Binary files a/doc/examples/algorithms/tmp/mestaticprojection.pkl and b/doc/examples/algorithms/tmp/mestaticprojection.pkl differ diff --git a/doc/examples/examples/plot_Lorenz.py b/doc/examples/examples/plot_Lorenz.py index 07e3933e..3829e07e 100755 --- a/doc/examples/examples/plot_Lorenz.py +++ b/doc/examples/examples/plot_Lorenz.py @@ -30,6 +30,8 @@ import pygpc import numpy as np from collections import OrderedDict +import matplotlib +matplotlib.use("Qt5Agg") #%% # At first, we are loading the model: @@ -93,23 +95,26 @@ output_idx=None, calc_sobol=True, calc_global_sens=True, - calc_pdf=False, - n_samples=int(1e4)) + calc_pdf=False) # extract sensitivity coefficients from results .hdf5 file sobol, gsens = pygpc.get_sens_summary(fn_gpc=fn_results, parameters_random=session.parameters_random, fn_out=fn_results + "_sens_summary.txt") -# plot time course of sensitivity coefficients and mean and standard deviation of x(t) +# plot time course of mean together with probability density, sobol sensitivity coefficients and global derivatives t = np.arange(0.0, parameters["t_end"], parameters["step_size"]) -pygpc.plot_sens_summary(sobol=sobol, +pygpc.plot_sens_summary(session=session, + coeffs=coeffs, + sobol=sobol, gsens=gsens, - multiple_qoi=True, + plot_pdf_over_output_idx=True, qois=t, - results=results, + mean=pygpc.SGPC.get_mean(coeffs), + std=pygpc.SGPC.get_std(coeffs), x_label="t in s", - y_label="x(t)") + y_label="x(t)", + zlim=[0, 0.4]) # # On Windows subprocesses will import (i.e. execute) the main module at start. diff --git a/doc/examples/examples/plot_Lorenz_julia.py b/doc/examples/examples/plot_Lorenz_julia.py index a6a19a53..3a9e7eb4 100755 --- a/doc/examples/examples/plot_Lorenz_julia.py +++ b/doc/examples/examples/plot_Lorenz_julia.py @@ -100,6 +100,8 @@ def simulate(self, process_id=None, matlab_engine=None): import pygpc import numpy as np from collections import OrderedDict + import matplotlib + matplotlib.use("Qt5Agg") # Windows users have to encapsulate the code into a main function to avoid multiprocessing errors. # def main(): @@ -162,14 +164,17 @@ def simulate(self, process_id=None, matlab_engine=None): # plot sobol indices over time and mean and standard deviation of x(t) t = np.arange(0.0, parameters["t_end"], parameters["step_size"]) - pygpc.plot_sens_summary(sobol=sobol, + pygpc.plot_sens_summary(session=session, + coeffs=coeffs, + sobol=sobol, gsens=gsens, - multiple_qoi=True, + plot_pdf_over_output_idx=True, qois=t, - results=results, + mean=pygpc.SGPC.get_mean(coeffs), + std=pygpc.SGPC.get_std(coeffs), x_label="t in s", - y_label="x(t)") - + y_label="x(t)", + zlim=[0, 0.4]) """ import matplotlib.pyplot as plt diff --git a/doc/examples/examples/plot_ishigami.py b/doc/examples/examples/plot_ishigami.py index 3df0f009..5bd1baf4 100755 --- a/doc/examples/examples/plot_ishigami.py +++ b/doc/examples/examples/plot_ishigami.py @@ -18,6 +18,8 @@ import pygpc import numpy as np from collections import OrderedDict +import matplotlib +matplotlib.use("Qt5Agg") fn_results = "tmp/example_ishigami" @@ -112,8 +114,8 @@ #%% # Sensitivity analysis # ^^^^^^^^^^^^^^^^^^^^ -sobol, gsens = pygpc.get_sens_summary(fn_results, parameters_random) -pygpc.plot_sens_summary(sobol, gsens) +sobol, gsens = pygpc.get_sens_summary(fn_results, parameters_random, fn_results + "_sens_summary.txt") +pygpc.plot_sens_summary(sobol=sobol, gsens=gsens) # # On Windows subprocesses will import (i.e. execute) the main module at start. diff --git a/doc/examples/examples/tmp/example_ishigami.hdf5 b/doc/examples/examples/tmp/example_ishigami.hdf5 deleted file mode 100644 index 44dbc2a0..00000000 Binary files a/doc/examples/examples/tmp/example_ishigami.hdf5 and /dev/null differ diff --git a/doc/examples/examples/tmp/example_ishigami.pkl b/doc/examples/examples/tmp/example_ishigami.pkl index 03448cc0..56eb7071 100755 Binary files a/doc/examples/examples/tmp/example_ishigami.pkl and b/doc/examples/examples/tmp/example_ishigami.pkl differ diff --git a/doc/examples/examples/tmp/example_ishigami_mc.hdf5 b/doc/examples/examples/tmp/example_ishigami_mc.hdf5 deleted file mode 100644 index 31d8ebfd..00000000 Binary files a/doc/examples/examples/tmp/example_ishigami_mc.hdf5 and /dev/null differ diff --git a/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.pdf b/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.pdf index c72ba37b..3d3bb8bb 100755 Binary files a/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.pdf and b/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.pdf differ diff --git a/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.png b/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.png index 7c99a40d..89a17e3d 100755 Binary files a/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.png and b/doc/examples/examples/tmp/example_ishigami_mc_qoi_0.png differ diff --git a/doc/examples/examples/tmp/example_ishigami_sens_summary.txt b/doc/examples/examples/tmp/example_ishigami_sens_summary.txt new file mode 100644 index 00000000..81ed3a24 --- /dev/null +++ b/doc/examples/examples/tmp/example_ishigami_sens_summary.txt @@ -0,0 +1,14 @@ +Normalized Sobol indices: +========================== +['x2']: 0.442411 +['x1']: 0.313905 +['x1', 'x3']: 0.243684 +['x1', 'x2']: 0.000000 +['x2', 'x3']: 0.000000 +['x3']: 0.000000 + +Average derivatives: +========================== +['x1']: 8.47e-06 +['x2']: -3.25e-07 +['x3']: -1.51e-06 diff --git a/doc/examples/examples/tmp/example_ishigami_val.hdf5 b/doc/examples/examples/tmp/example_ishigami_val.hdf5 deleted file mode 100644 index 5bff5574..00000000 Binary files a/doc/examples/examples/tmp/example_ishigami_val.hdf5 and /dev/null differ diff --git a/doc/examples/examples/tmp/example_ishigami_val_qoi_0.pdf b/doc/examples/examples/tmp/example_ishigami_val_qoi_0.pdf index ed108895..e053c93d 100755 Binary files a/doc/examples/examples/tmp/example_ishigami_val_qoi_0.pdf and b/doc/examples/examples/tmp/example_ishigami_val_qoi_0.pdf differ diff --git a/doc/examples/examples/tmp/example_ishigami_val_qoi_0.png b/doc/examples/examples/tmp/example_ishigami_val_qoi_0.png index 299fec20..eb1fca18 100755 Binary files a/doc/examples/examples/tmp/example_ishigami_val_qoi_0.png and b/doc/examples/examples/tmp/example_ishigami_val_qoi_0.png differ diff --git a/doc/examples/examples/tmp/example_lorenz.hdf5 b/doc/examples/examples/tmp/example_lorenz.hdf5 deleted file mode 100644 index a9e8eecd..00000000 Binary files a/doc/examples/examples/tmp/example_lorenz.hdf5 and /dev/null differ diff --git a/doc/examples/examples/tmp/example_lorenz_sens_summary.txt b/doc/examples/examples/tmp/example_lorenz_sens_summary.txt index 24a80755..3cb39771 100755 --- a/doc/examples/examples/tmp/example_lorenz_sens_summary.txt +++ b/doc/examples/examples/tmp/example_lorenz_sens_summary.txt @@ -1,14 +1,14 @@ Normalized Sobol indices: =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -['rho']: 0.240235 0.982133 0.979162 0.981394 0.984593 0.987353 0.989438 0.990944 0.992012 0.992763 0.993282 0.993627 0.993836 0.993931 0.993924 0.993817 0.993606 0.993279 0.992817 0.992197 0.991387 0.990352 0.989052 0.987448 0.985500 0.983176 0.980454 0.977323 0.973788 0.969865 0.965588 0.960995 0.956134 0.951052 0.945797 0.940410 0.934926 0.929377 0.923784 0.918166 0.912537 0.906905 0.901282 0.895673 0.890087 0.884530 0.879012 0.873542 0.868128 0.862782 0.857513 0.852331 0.847249 0.842276 0.837421 0.832693 0.828101 0.823652 0.819351 0.815203 0.811212 0.807381 0.803710 0.800200 0.796851 0.793661 0.790628 0.787749 0.785021 0.782439 0.779999 0.777697 0.775527 0.773484 0.771564 0.769760 0.768067 0.766480 0.764994 0.763604 0.762304 0.761089 0.759955 0.758896 0.757909 0.756990 0.756133 0.755336 0.754594 0.753903 0.753262 0.752665 0.752111 0.751596 0.751118 0.750674 0.750263 0.749881 0.749526 0.749197 -['beta']: 0.209073 0.008158 0.011212 0.009921 0.007827 0.006058 0.004775 0.003890 0.003294 0.002898 0.002646 0.002499 0.002436 0.002444 0.002518 0.002658 0.002868 0.003155 0.003533 0.004015 0.004621 0.005373 0.006295 0.007415 0.008761 0.010358 0.012234 0.014408 0.016897 0.019708 0.022845 0.026299 0.030059 0.034105 0.038415 0.042963 0.047722 0.052667 0.057771 0.063012 0.068368 0.073819 0.079348 0.084936 0.090569 0.096228 0.101899 0.107566 0.113213 0.118823 0.124381 0.129872 0.135280 0.140593 0.145796 0.150878 0.155828 0.160636 0.165294 0.169796 0.174135 0.178309 0.182314 0.186149 0.189813 0.193308 0.196634 0.199795 0.202793 0.205632 0.208317 0.210851 0.213241 0.215492 0.217609 0.219597 0.221463 0.223213 0.224851 0.226384 0.227818 0.229157 0.230407 0.231573 0.232660 0.233673 0.234616 0.235493 0.236310 0.237069 0.237775 0.238430 0.239039 0.239604 0.240129 0.240616 0.241068 0.241487 0.241875 0.242236 -['beta', 'rho']: 0.178454 0.000000 0.000001 0.000004 0.000008 0.000015 0.000024 0.000035 0.000049 0.000068 0.000092 0.000123 0.000164 0.000219 0.000291 0.000387 0.000513 0.000679 0.000895 0.001175 0.001532 0.001980 0.002534 0.003204 0.003998 0.004917 0.005951 0.007087 0.008298 0.009556 0.010827 0.012074 0.013268 0.014379 0.015388 0.016279 0.017047 0.017689 0.018209 0.018613 0.018910 0.019109 0.019221 0.019255 0.019222 0.019129 0.018984 0.018796 0.018571 0.018314 0.018031 0.017727 0.017406 0.017072 0.016729 0.016379 0.016025 0.015670 0.015316 0.014965 0.014619 0.014279 0.013947 0.013623 0.013309 0.013006 0.012714 0.012434 0.012166 0.011910 0.011666 0.011435 0.011216 0.011009 0.010814 0.010630 0.010457 0.010296 0.010144 0.010002 0.009870 0.009746 0.009631 0.009524 0.009424 0.009332 0.009246 0.009166 0.009092 0.009024 0.008960 0.008902 0.008847 0.008797 0.008750 0.008707 0.008668 0.008631 0.008597 0.008566 -['sigma', 'rho']: 0.140103 0.002380 0.001794 0.001427 0.001195 0.001048 0.000955 0.000896 0.000861 0.000840 0.000828 0.000823 0.000819 0.000816 0.000811 0.000802 0.000788 0.000767 0.000738 0.000700 0.000653 0.000598 0.000534 0.000466 0.000395 0.000325 0.000259 0.000199 0.000149 0.000110 0.000081 0.000061 0.000050 0.000044 0.000042 0.000043 0.000045 0.000046 0.000047 0.000047 0.000047 0.000046 0.000044 0.000042 0.000041 0.000039 0.000037 0.000036 0.000035 0.000033 0.000032 0.000031 0.000030 0.000029 0.000028 0.000027 0.000026 0.000025 0.000024 0.000023 0.000022 0.000021 0.000019 0.000018 0.000017 0.000016 0.000015 0.000014 0.000013 0.000012 0.000011 0.000010 0.000009 0.000008 0.000007 0.000007 0.000006 0.000005 0.000005 0.000004 0.000004 0.000003 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000000 0.000000 0.000000 -['sigma']: 0.117694 0.007308 0.007810 0.007240 0.006369 0.005522 0.004806 0.004232 0.003782 0.003430 0.003151 0.002927 0.002743 0.002588 0.002454 0.002334 0.002223 0.002117 0.002013 0.001909 0.001802 0.001691 0.001577 0.001458 0.001336 0.001212 0.001088 0.000967 0.000850 0.000740 0.000639 0.000548 0.000467 0.000397 0.000336 0.000285 0.000243 0.000207 0.000177 0.000152 0.000132 0.000115 0.000100 0.000088 0.000078 0.000069 0.000061 0.000054 0.000048 0.000043 0.000038 0.000034 0.000030 0.000027 0.000024 0.000021 0.000018 0.000016 0.000014 0.000012 0.000011 0.000009 0.000008 0.000007 0.000006 0.000005 0.000005 0.000004 0.000003 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 -['sigma', 'beta']: 0.114440 0.000021 0.000021 0.000013 0.000008 0.000005 0.000003 0.000002 0.000002 0.000002 0.000001 0.000001 0.000002 0.000002 0.000002 0.000002 0.000003 0.000003 0.000004 0.000004 0.000005 0.000006 0.000007 0.000009 0.000010 0.000012 0.000014 0.000016 0.000018 0.000020 0.000021 0.000022 0.000023 0.000022 0.000021 0.000019 0.000017 0.000015 0.000012 0.000010 0.000008 0.000006 0.000006 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000004 0.000003 0.000003 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000002 0.000002 0.000002 0.000003 0.000003 0.000004 0.000004 0.000004 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000005 0.000004 0.000004 0.000004 0.000004 0.000004 0.000003 0.000003 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 +['beta']: 0.339810 0.008158 0.011210 0.009918 0.007823 0.006053 0.004770 0.003886 0.003289 0.002893 0.002640 0.002493 0.002430 0.002437 0.002510 0.002649 0.002857 0.003144 0.003519 0.004000 0.004603 0.005353 0.006273 0.007391 0.008734 0.010329 0.012203 0.014374 0.016858 0.019663 0.022790 0.026232 0.029976 0.034003 0.038291 0.042816 0.047553 0.052476 0.057561 0.062785 0.068126 0.073565 0.079082 0.084658 0.090277 0.095923 0.101578 0.107227 0.112855 0.118447 0.123989 0.129467 0.134867 0.140177 0.145385 0.150479 0.155449 0.160287 0.164983 0.169530 0.173922 0.178154 0.182223 0.186124 0.189857 0.193422 0.196817 0.200046 0.203109 0.206010 0.208753 0.211341 0.213779 0.216073 0.218228 0.220249 0.222142 0.223914 0.225569 0.227115 0.228557 0.229900 0.231150 0.232313 0.233395 0.234399 0.235331 0.236196 0.236998 0.237742 0.238431 0.239068 0.239659 0.240205 0.240710 0.241177 0.241610 0.242009 0.242378 0.242719 +['sigma', 'rho']: 0.244505 0.002379 0.001792 0.001425 0.001193 0.001045 0.000952 0.000893 0.000856 0.000835 0.000822 0.000815 0.000811 0.000806 0.000799 0.000788 0.000772 0.000749 0.000717 0.000677 0.000628 0.000571 0.000508 0.000440 0.000370 0.000303 0.000241 0.000188 0.000143 0.000110 0.000085 0.000070 0.000060 0.000055 0.000052 0.000050 0.000049 0.000048 0.000046 0.000045 0.000044 0.000042 0.000041 0.000040 0.000040 0.000039 0.000039 0.000039 0.000039 0.000038 0.000038 0.000037 0.000036 0.000035 0.000034 0.000032 0.000031 0.000029 0.000028 0.000026 0.000024 0.000023 0.000021 0.000019 0.000018 0.000016 0.000015 0.000013 0.000012 0.000011 0.000010 0.000009 0.000008 0.000007 0.000006 0.000005 0.000005 0.000004 0.000004 0.000003 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000000 0.000000 0.000000 0.000000 0.000000 +['beta', 'rho']: 0.149188 0.000000 0.000001 0.000004 0.000009 0.000015 0.000024 0.000035 0.000050 0.000069 0.000093 0.000124 0.000166 0.000221 0.000294 0.000391 0.000518 0.000686 0.000905 0.001187 0.001547 0.002000 0.002559 0.003237 0.004039 0.004966 0.006009 0.007152 0.008369 0.009629 0.010896 0.012136 0.013317 0.014413 0.015404 0.016277 0.017028 0.017654 0.018162 0.018558 0.018851 0.019053 0.019171 0.019218 0.019201 0.019128 0.019007 0.018843 0.018642 0.018408 0.018145 0.017857 0.017548 0.017221 0.016880 0.016527 0.016167 0.015801 0.015433 0.015066 0.014701 0.014342 0.013989 0.013646 0.013313 0.012991 0.012681 0.012385 0.012102 0.011834 0.011579 0.011338 0.011111 0.010897 0.010697 0.010510 0.010334 0.010171 0.010019 0.009877 0.009746 0.009624 0.009511 0.009406 0.009309 0.009219 0.009136 0.009060 0.008989 0.008924 0.008864 0.008809 0.008758 0.008711 0.008668 0.008628 0.008591 0.008558 0.008527 0.008498 +['sigma', 'beta']: 0.119990 0.000021 0.000021 0.000014 0.000008 0.000005 0.000003 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000003 0.000003 0.000004 0.000005 0.000006 0.000007 0.000009 0.000010 0.000012 0.000014 0.000016 0.000018 0.000020 0.000022 0.000023 0.000024 0.000025 0.000025 0.000024 0.000023 0.000021 0.000019 0.000017 0.000015 0.000013 0.000011 0.000010 0.000008 0.000008 0.000007 0.000007 0.000007 0.000007 0.000007 0.000006 0.000006 0.000005 0.000005 0.000004 0.000004 0.000004 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 +['rho']: 0.087601 0.982134 0.979167 0.981401 0.984602 0.987363 0.989450 0.990956 0.992026 0.992778 0.993299 0.993646 0.993857 0.993955 0.993951 0.993848 0.993640 0.993317 0.992858 0.992241 0.991432 0.990396 0.989093 0.987483 0.985526 0.983190 0.980455 0.977311 0.973766 0.969840 0.965568 0.960990 0.956153 0.951105 0.945889 0.940544 0.935103 0.929592 0.924033 0.918443 0.912833 0.907214 0.901596 0.895988 0.890399 0.884836 0.879312 0.873834 0.868414 0.863062 0.857789 0.852605 0.847519 0.842540 0.837678 0.832941 0.828335 0.823867 0.819543 0.815366 0.811341 0.807472 0.803758 0.800203 0.796805 0.793565 0.790480 0.787550 0.784771 0.782141 0.779654 0.777309 0.775098 0.773019 0.771066 0.769233 0.767516 0.765908 0.764406 0.763002 0.761693 0.760472 0.759335 0.758277 0.757293 0.756379 0.755530 0.754741 0.754010 0.753332 0.752704 0.752121 0.751582 0.751083 0.750621 0.750194 0.749798 0.749433 0.749095 0.748782 +['sigma']: 0.058906 0.007308 0.007808 0.007238 0.006365 0.005518 0.004801 0.004227 0.003777 0.003424 0.003144 0.002919 0.002734 0.002578 0.002443 0.002321 0.002208 0.002101 0.001996 0.001890 0.001782 0.001671 0.001556 0.001438 0.001317 0.001195 0.001074 0.000955 0.000841 0.000735 0.000636 0.000548 0.000469 0.000401 0.000342 0.000292 0.000249 0.000213 0.000182 0.000157 0.000135 0.000117 0.000101 0.000087 0.000076 0.000066 0.000057 0.000050 0.000044 0.000038 0.000033 0.000029 0.000025 0.000022 0.000019 0.000017 0.000015 0.000013 0.000012 0.000010 0.000009 0.000008 0.000007 0.000006 0.000005 0.000005 0.000004 0.000004 0.000003 0.000003 0.000002 0.000002 0.000002 0.000002 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000001 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 Average derivatives: =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== -['beta']: 1.37e-15 9.62e-04 3.93e-03 7.39e-03 1.06e-02 1.35e-02 1.61e-02 1.88e-02 2.16e-02 2.48e-02 2.84e-02 3.27e-02 3.78e-02 4.38e-02 5.10e-02 5.96e-02 7.00e-02 8.23e-02 9.69e-02 1.14e-01 1.35e-01 1.58e-01 1.85e-01 2.16e-01 2.50e-01 2.88e-01 3.29e-01 3.73e-01 4.19e-01 4.66e-01 5.13e-01 5.60e-01 6.06e-01 6.50e-01 6.92e-01 7.30e-01 7.67e-01 8.00e-01 8.30e-01 8.58e-01 8.83e-01 9.06e-01 9.27e-01 9.46e-01 9.64e-01 9.81e-01 9.97e-01 1.01e+00 1.03e+00 1.04e+00 1.05e+00 1.06e+00 1.07e+00 1.08e+00 1.10e+00 1.10e+00 1.11e+00 1.12e+00 1.13e+00 1.14e+00 1.14e+00 1.15e+00 1.16e+00 1.16e+00 1.17e+00 1.17e+00 1.18e+00 1.18e+00 1.18e+00 1.19e+00 1.19e+00 1.19e+00 1.19e+00 1.20e+00 1.20e+00 1.20e+00 1.20e+00 1.20e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 -['sigma']: -9.62e-16 9.08e-04 3.25e-03 6.24e-03 9.40e-03 1.26e-02 1.58e-02 1.91e-02 2.26e-02 2.63e-02 3.03e-02 3.46e-02 3.93e-02 4.43e-02 4.97e-02 5.53e-02 6.12e-02 6.72e-02 7.33e-02 7.93e-02 8.50e-02 9.03e-02 9.49e-02 9.86e-02 1.01e-01 1.03e-01 1.03e-01 1.01e-01 9.84e-02 9.44e-02 8.94e-02 8.37e-02 7.75e-02 7.11e-02 6.48e-02 5.88e-02 5.32e-02 4.82e-02 4.38e-02 3.99e-02 3.66e-02 3.38e-02 3.15e-02 2.95e-02 2.78e-02 2.63e-02 2.49e-02 2.36e-02 2.24e-02 2.12e-02 2.00e-02 1.88e-02 1.76e-02 1.64e-02 1.52e-02 1.40e-02 1.28e-02 1.17e-02 1.06e-02 9.55e-03 8.56e-03 7.62e-03 6.75e-03 5.93e-03 5.18e-03 4.50e-03 3.87e-03 3.31e-03 2.79e-03 2.34e-03 1.93e-03 1.57e-03 1.25e-03 9.64e-04 7.18e-04 5.04e-04 3.20e-04 1.61e-04 2.56e-05 -8.90e-05 -1.85e-04 -2.65e-04 -3.30e-04 -3.83e-04 -4.25e-04 -4.58e-04 -4.82e-04 -4.99e-04 -5.10e-04 -5.16e-04 -5.17e-04 -5.15e-04 -5.10e-04 -5.02e-04 -4.92e-04 -4.81e-04 -4.68e-04 -4.54e-04 -4.40e-04 -4.25e-04 -['rho']: -1.87e-15 1.05e-02 3.64e-02 7.25e-02 1.17e-01 1.68e-01 2.26e-01 2.92e-01 3.65e-01 4.46e-01 5.36e-01 6.35e-01 7.44e-01 8.63e-01 9.91e-01 1.13e+00 1.28e+00 1.43e+00 1.60e+00 1.76e+00 1.93e+00 2.11e+00 2.27e+00 2.43e+00 2.58e+00 2.72e+00 2.84e+00 2.94e+00 3.02e+00 3.09e+00 3.13e+00 3.16e+00 3.18e+00 3.19e+00 3.18e+00 3.17e+00 3.15e+00 3.13e+00 3.11e+00 3.08e+00 3.05e+00 3.02e+00 2.99e+00 2.97e+00 2.94e+00 2.91e+00 2.89e+00 2.86e+00 2.84e+00 2.82e+00 2.79e+00 2.77e+00 2.75e+00 2.73e+00 2.71e+00 2.69e+00 2.67e+00 2.65e+00 2.63e+00 2.62e+00 2.60e+00 2.58e+00 2.56e+00 2.54e+00 2.53e+00 2.51e+00 2.50e+00 2.48e+00 2.46e+00 2.45e+00 2.43e+00 2.42e+00 2.41e+00 2.39e+00 2.38e+00 2.37e+00 2.36e+00 2.35e+00 2.33e+00 2.32e+00 2.31e+00 2.30e+00 2.30e+00 2.29e+00 2.28e+00 2.27e+00 2.26e+00 2.26e+00 2.25e+00 2.24e+00 2.24e+00 2.23e+00 2.23e+00 2.22e+00 2.22e+00 2.21e+00 2.21e+00 2.21e+00 2.20e+00 2.20e+00 +['beta']: 1.48e-15 9.62e-04 3.93e-03 7.40e-03 1.06e-02 1.35e-02 1.61e-02 1.88e-02 2.16e-02 2.48e-02 2.85e-02 3.28e-02 3.79e-02 4.40e-02 5.12e-02 6.00e-02 7.04e-02 8.29e-02 9.77e-02 1.15e-01 1.36e-01 1.60e-01 1.87e-01 2.18e-01 2.53e-01 2.91e-01 3.32e-01 3.76e-01 4.21e-01 4.68e-01 5.14e-01 5.61e-01 6.06e-01 6.50e-01 6.91e-01 7.30e-01 7.67e-01 8.01e-01 8.32e-01 8.61e-01 8.87e-01 9.11e-01 9.32e-01 9.52e-01 9.71e-01 9.87e-01 1.00e+00 1.02e+00 1.03e+00 1.04e+00 1.05e+00 1.07e+00 1.08e+00 1.09e+00 1.09e+00 1.10e+00 1.11e+00 1.12e+00 1.13e+00 1.13e+00 1.14e+00 1.14e+00 1.15e+00 1.16e+00 1.16e+00 1.17e+00 1.17e+00 1.17e+00 1.18e+00 1.18e+00 1.18e+00 1.19e+00 1.19e+00 1.19e+00 1.20e+00 1.20e+00 1.20e+00 1.20e+00 1.20e+00 1.20e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.21e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 1.22e+00 +['rho']: 1.10e-15 1.05e-02 3.64e-02 7.25e-02 1.17e-01 1.68e-01 2.26e-01 2.92e-01 3.65e-01 4.46e-01 5.36e-01 6.35e-01 7.44e-01 8.63e-01 9.91e-01 1.13e+00 1.28e+00 1.43e+00 1.60e+00 1.76e+00 1.94e+00 2.11e+00 2.27e+00 2.43e+00 2.58e+00 2.72e+00 2.84e+00 2.94e+00 3.02e+00 3.08e+00 3.13e+00 3.16e+00 3.18e+00 3.18e+00 3.18e+00 3.17e+00 3.15e+00 3.13e+00 3.11e+00 3.08e+00 3.06e+00 3.03e+00 3.01e+00 2.98e+00 2.95e+00 2.93e+00 2.90e+00 2.88e+00 2.86e+00 2.83e+00 2.81e+00 2.79e+00 2.77e+00 2.74e+00 2.72e+00 2.70e+00 2.68e+00 2.66e+00 2.64e+00 2.62e+00 2.60e+00 2.58e+00 2.56e+00 2.54e+00 2.53e+00 2.51e+00 2.49e+00 2.47e+00 2.46e+00 2.44e+00 2.43e+00 2.41e+00 2.40e+00 2.38e+00 2.37e+00 2.36e+00 2.35e+00 2.34e+00 2.32e+00 2.31e+00 2.30e+00 2.30e+00 2.29e+00 2.28e+00 2.27e+00 2.26e+00 2.25e+00 2.25e+00 2.24e+00 2.24e+00 2.23e+00 2.22e+00 2.22e+00 2.22e+00 2.21e+00 2.21e+00 2.20e+00 2.20e+00 2.20e+00 2.19e+00 +['sigma']: -1.16e-15 9.08e-04 3.25e-03 6.23e-03 9.38e-03 1.26e-02 1.58e-02 1.91e-02 2.25e-02 2.62e-02 3.02e-02 3.45e-02 3.91e-02 4.40e-02 4.92e-02 5.47e-02 6.04e-02 6.62e-02 7.20e-02 7.76e-02 8.29e-02 8.77e-02 9.18e-02 9.50e-02 9.71e-02 9.80e-02 9.77e-02 9.62e-02 9.36e-02 9.01e-02 8.58e-02 8.09e-02 7.58e-02 7.05e-02 6.52e-02 6.01e-02 5.52e-02 5.05e-02 4.61e-02 4.21e-02 3.83e-02 3.49e-02 3.18e-02 2.89e-02 2.64e-02 2.40e-02 2.20e-02 2.01e-02 1.84e-02 1.69e-02 1.55e-02 1.43e-02 1.32e-02 1.22e-02 1.13e-02 1.04e-02 9.66e-03 8.98e-03 8.35e-03 7.79e-03 7.28e-03 6.82e-03 6.40e-03 6.02e-03 5.68e-03 5.37e-03 5.09e-03 4.83e-03 4.59e-03 4.38e-03 4.18e-03 3.99e-03 3.82e-03 3.66e-03 3.51e-03 3.36e-03 3.23e-03 3.10e-03 2.98e-03 2.86e-03 2.74e-03 2.63e-03 2.53e-03 2.42e-03 2.33e-03 2.23e-03 2.14e-03 2.05e-03 1.96e-03 1.88e-03 1.80e-03 1.72e-03 1.65e-03 1.57e-03 1.50e-03 1.44e-03 1.37e-03 1.31e-03 1.25e-03 1.20e-03 diff --git a/doc/examples/gpc/plot_multi_element_gpc.py b/doc/examples/gpc/plot_multi_element_gpc.py index e39a396a..2614ab3a 100755 --- a/doc/examples/gpc/plot_multi_element_gpc.py +++ b/doc/examples/gpc/plot_multi_element_gpc.py @@ -35,6 +35,8 @@ class is assigned to each sample (unsupervised learning) depending on its functi # def main(): import pygpc from collections import OrderedDict +import matplotlib +matplotlib.use("Qt5Agg") fn_results = 'tmp/mestatic' # filename of output save_session_format = ".pkl" # file format of saved gpc session ".hdf5" (slow) or ".pkl" (fast) diff --git a/pygpc/Algorithm.py b/pygpc/Algorithm.py index 3b6426a6..944c0bfe 100755 --- a/pygpc/Algorithm.py +++ b/pygpc/Algorithm.py @@ -257,6 +257,127 @@ def check_basic_options(self): if "grid_extension_method" not in self.options.keys(): self.options["grid_extension_method"] = "GPR" + def check_results(self, results, grid, gradient_results=None, gradient_results_idx=None, com=None, resample=True): + """ + Check the validity of the results and resample if required. + Updates the gPC object, the containing grid, and the results array. + + Parameters + ---------- + results : np.ndarray of float [n_samples x n_qoi] + Model output at sampling points. + grid : Grid object instance + Grid object instance the results are computed for. + gradient_results : ndarray of float [n_grid x n_out x dim], optional, default: None + Gradient of model function in grid points. + gradient_results_idx : ndarray of int [n_grid], optional, default: None + Indices of grid points where the gradient was evaluated. + com : Computation class instance, optional, default: None + Computation class instance to run the model if resample is True. + resample : bool, optional, default: True + Resample grid points and rerun model (requires Computational class instance to run model). + If False, the grid points and results are just deleted. + + Returns + ------- + results : np.ndarray of float [n_samples x n_qoi] + Updated (fixed) model output at sampling points. + gpc : SGPC or MEGPC object instance or list of SGPC or MEGPC object instances + GPC object(s) containing the basis functions and the updated grid. + gradient_results : ndarray of float [n_grid x n_out x dim] + Updated (fixed) gradients of model function in grid points not containing the points where + the gradients were NaN. + gradient_results_idx : ndarray of int [n_grid], optional, default: None + Updated (fixed) indices of grid points where the gradient was evaluated not containing the points where + the gradients were NaN. + grid : Grid object instance + Updated (fixed) grid object instance the results are computed for not containing the grid points where + the results were NaN. + """ + # get the indices of the sampling points where any of the QOIs were NaN + idx_nan = np.unique(np.where(np.isnan(results))[0]) + idx_nan_gradient = np.array([]) + + if gradient_results is not None: + idx_nan_gradient_local = np.unique(np.where(np.isnan(gradient_results))[0]) + + if len(idx_nan_gradient_local) > 0: + idx_nan_gradient = gradient_results_idx[idx_nan_gradient_local] + + idx_nan = np.unique(np.hstack((idx_nan, idx_nan_gradient)).astype(int)) + + if resample: + while len(idx_nan) > 0: + if self.options["verbose"]: + print(f"WARNING! Detected {len(idx_nan)} grid points with NaN results. Resampling ...") + + # resample grid points + grid.resample(idx=idx_nan) + + # determine results at resampled grid points + results_resample = com.run(model=self.problem.model, + problem=self.problem, + coords=grid.coords[idx_nan, :], + coords_norm=grid.coords_norm[idx_nan, :], + i_iter=None, + i_subiter=None, + fn_results=None, + print_func_time=self.options["print_func_time"], + verbose=self.options["verbose"]) + + # Determine gradient [n_grid x n_out x dim] + if gradient_results is not None: + if self.options["gradient_calculation"] == "FD_fwd": + # for forward gradient calculation only pass the resampled grid points + grid_gradient = copy.deepcopy(grid) + idx_not_nan = np.array([i for i in range(grid.n_grid) if i not in idx_nan]) + grid_gradient.delete(idx=idx_not_nan) + + else: + # for gradient calculation from adjacent grid points pass the complete grid + grid_gradient = grid + + gradient_results_resample, gradient_results_idx_resample = get_gradient( + model=self.problem.model, + problem=self.problem, + grid=grid_gradient, + results=results_resample, + com=com, + method=self.options["gradient_calculation"], + gradient_results_present=None, + gradient_idx_skip=None, + i_iter=None, + i_subiter=None, + print_func_time=self.options["print_func_time"], + dx=self.options["gradient_calculation_options"]["dx"], + distance_weight=self.options["gradient_calculation_options"]["distance_weight"], + verbose=self.options["verbose"]) + + if self.options["gradient_calculation"] == "FD_fwd": + gradient_results[idx_nan, :, :] = gradient_results_resample + gradient_results_idx[idx_nan] = idx_nan[gradient_results_idx_resample] + else: + gradient_results = gradient_results_resample + gradient_results_idx = gradient_results_idx_resample + + # replace NaN results with new results at resampled grid points + results[idx_nan, :] = results_resample + + # get the indices of the sampling points where any of the QOIs where NaN + idx_nan = np.unique(np.where(np.isnan(results))[0]) + else: + # remove grid points with NaN results + grid.delete(idx=idx_nan) + + # remove NaN results + results = np.delete(results, idx_nan, axis=0) + + if gradient_results is not None: + gradient_results = np.delete(gradient_results, idx_nan_gradient_local, axis=0) + gradient_results_idx = np.delete(gradient_results_idx, idx_nan_gradient_local, axis=0) + + return results, gradient_results, gradient_results_idx, grid + class Static_IO(Algorithm): """ @@ -324,12 +445,12 @@ def __init__(self, parameters, options, results, grid, validation=None): if "interaction_order" not in self.options.keys(): self.options["interaction_order"] = self.problem.dim - if "error_type" not in self.options.keys(): - self.options["error_type"] = "loocv" + #if "error_type" not in self.options.keys(): + # self.options["error_type"] = "loocv" - if self.options["error_type"] != "loocv": - self.options["error_type"] = "loocv" - warnings.warn("Changing error calculation type to loocv ...") + #if self.options["error_type"] != "loocv": + # self.options["error_type"] = "loocv" + #warnings.warn("Changing error calculation type to loocv ...") def run(self): """ @@ -606,7 +727,7 @@ def run(self): eps_pre = eps + 1 i_grid = 0 - res_list = [] + res = np.array([]) # determine gpc approximation and determine error (increase grid size in case of adaptive sampling) while eps > self.options["eps"]: @@ -616,16 +737,21 @@ def run(self): start_time = time.time() - res_list.append(com.run(model=self.problem.model, - problem=self.problem, - coords=gpc.grid.coords[i_grid:gpc.grid.n_grid, :], - coords_norm=gpc.grid.coords_norm[i_grid:gpc.grid.n_grid, :], - i_iter=gpc.order_max, - i_subiter=gpc.interaction_order, - fn_results=None, - print_func_time=self.options["print_func_time"], - verbose=self.options["verbose"])) - res = np.vstack(res_list) + res_new = com.run(model=self.problem.model, + problem=self.problem, + coords=gpc.grid.coords[i_grid:gpc.grid.n_grid, :], + coords_norm=gpc.grid.coords_norm[i_grid:gpc.grid.n_grid, :], + i_iter=gpc.order_max, + i_subiter=gpc.interaction_order, + fn_results=None, + print_func_time=self.options["print_func_time"], + verbose=self.options["verbose"]) + + if len(res) > 0: + res = np.vstack((res, res_new)) + else: + res = res_new + i_grid = gpc.grid.n_grid iprint('Total parallel function evaluation: ' + str(time.time() - start_time) + ' sec', @@ -653,6 +779,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res, grad_res_3D, gradient_idx, gpc.grid = self.check_results(results=res, + gradient_results=grad_res_3D, + gradient_results_idx=gradient_idx, + grid=gpc.grid, + com=com) + # Initialize gpc matrix gpc.init_gpc_matrix(gradient_idx=gradient_idx) @@ -685,7 +818,7 @@ def run(self): gpc.grid.n_grid, n_grid_new, n_grid_new - gpc.grid.n_grid, self.options["grid_extension_method"]), tab=0, verbose=self.options["verbose"]) if self.options["grid_extension_method"] == "GPR": - gpc.grid.extend_random_grid(n_grid_new=n_grid_new, results=res) + gpc.grid.extend_random_grid(n_grid_new=n_grid_new, results=res, type="GP") else: gpc.grid.extend_random_grid(n_grid_new=n_grid_new) @@ -873,7 +1006,7 @@ def run(self): raise ValueError("If grid is not provided during initialization please provide options['n_grid']") elif self.options["grid"] == Random or self.options["grid"] == LHS or self.options["grid"] == GP: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={n_grid}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(n_grid)}") grid = self.options["grid"](parameters_random=self.problem.parameters_random, n_grid=n_grid, options=self.options["grid_options"]) @@ -918,6 +1051,8 @@ def run(self): options=self.options, validation=self.validation) + res_all = np.array([]) + # determine gpc approximation and determine error (increase grid size in case of adaptive sampling) while eps > self.options["eps"]: if i_grid < grid.n_grid: @@ -927,17 +1062,20 @@ def run(self): start_time = time.time() - res_all_list.append(com.run(model=self.problem.model, - problem=self.problem, - coords=grid.coords[i_grid:grid.n_grid, :], - coords_norm=grid.coords_norm[i_grid:grid.n_grid, :], - i_iter=self.options["order_max"], - i_subiter=self.options["interaction_order"], - fn_results=None, - print_func_time=self.options["print_func_time"], - verbose=self.options["verbose"])) - - res_all = np.vstack(res_all_list) + res_new = com.run(model=self.problem.model, + problem=self.problem, + coords=grid.coords[i_grid:grid.n_grid, :], + coords_norm=grid.coords_norm[i_grid:grid.n_grid, :], + i_iter=self.options["order_max"], + i_subiter=self.options["interaction_order"], + fn_results=None, + print_func_time=self.options["print_func_time"], + verbose=self.options["verbose"]) + + if len(res_all) > 0: + res_all = np.vstack(res_all, res_new) + else: + res_all = res_new if i_qoi == 0 and i_grid == 0: if self.options["qoi"] == "all": @@ -973,6 +1111,12 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) # crop results to considered qoi if self.options["qoi"] != "all": res = copy.deepcopy(res_all) @@ -1238,9 +1382,9 @@ def __init__(self, parameters, options, results, grid, validation=None): else: self.qoi_specific = False - if self.options["error_type"] != "loocv": - self.options["error_type"] = "loocv" - warnings.warn("Changing error calculation type to loocv ...") + # if self.options["error_type"] != "loocv": + # self.options["error_type"] = "loocv" + # warnings.warn("Changing error calculation type to loocv ...") def run(self): """ @@ -1553,12 +1697,12 @@ def run(self): coords_gradient_norm=self.grid.coords_gradient_norm, options=self.options["grid_options"]) elif self.options["grid"] == Random or self.options["grid"] == GP: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={n_grid}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(n_grid)}") grid_original = self.options["grid"](parameters_random=self.problem.parameters_random, n_grid=n_grid, options=self.options["grid_options"]) else: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={n_grid}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(n_grid)}") grid_original = LHS(parameters_random=self.problem.parameters_random, n_grid=n_grid, options={"criterion": "ese", @@ -1648,6 +1792,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid_original = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid_original, + com=com) + # crop results to considered qoi if self.options["qoi"] != "all": res = copy.deepcopy(res_all) @@ -1944,7 +2095,7 @@ def run(self): options=self.options["grid_options"]) elif self.options["grid"] == Random or self.options["grid"] == LHS or self.options["grid"] == GP: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={self.options['n_grid']}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(self.options['n_grid'])}") grid = self.options["grid"](parameters_random=self.problem.parameters_random, n_grid=self.options["n_grid"], options=self.options["grid_options"]) @@ -1972,6 +2123,8 @@ def run(self): n_qoi = len(qoi_idx) + res_all = np.array([]) + while i_qoi < n_qoi: q_idx = qoi_idx[i_qoi] print_str = "Determining gPC approximation for QOI #{}:".format(q_idx) @@ -1985,7 +2138,7 @@ def run(self): options=self.options, validation=self.validation) - eps_pre = eps + 1 + eps = self.options["eps"] + 1 # determine gpc approximation and determine error (increase grid size in case of adaptive sampling) while eps > self.options["eps"]: @@ -1996,17 +2149,20 @@ def run(self): start_time = time.time() - res_all_list.append(com.run(model=self.problem.model, - problem=self.problem, - coords=grid.coords[i_grid:grid.n_grid, :], - coords_norm=grid.coords_norm[i_grid:grid.n_grid, :], - i_iter=self.options["order_max"], - i_subiter=self.options["interaction_order"], - fn_results=None, - print_func_time=self.options["print_func_time"], - verbose=self.options["verbose"])) - - res_all = np.vstack(res_all_list) + res_new = com.run(model=self.problem.model, + problem=self.problem, + coords=grid.coords[i_grid:grid.n_grid, :], + coords_norm=grid.coords_norm[i_grid:grid.n_grid, :], + i_iter=self.options["order_max"], + i_subiter=self.options["interaction_order"], + fn_results=None, + print_func_time=self.options["print_func_time"], + verbose=self.options["verbose"]) + + if len(res_all) > 0: + res_all = np.vstack(res_all, res_new) + else: + res_all = res_new if i_qoi == 0 and i_grid == 0: if self.options["qoi"] == "all": @@ -2041,6 +2197,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) + # crop results to considered qoi if self.options["qoi"] != "all": res = copy.deepcopy(res_all) @@ -2374,7 +2537,7 @@ def run(self): options=self.options["grid_options"]) else: n_grid_init = np.ceil(self.options["matrix_ratio"] * gpc.basis.n_basis) - print(f"Creating initial grid ({self.options['grid']}) with n_grid={n_grid_init}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(n_grid_init)}") if self.options["grid"] in [L1, L1_LHS, LHS_L1, FIM, CO]: if "n_pool" in self.options["grid_options"]: @@ -2559,13 +2722,20 @@ def run(self): distance_weight=self.options["gradient_calculation_options"]["distance_weight"], verbose=self.options["verbose"]) - if self.options["gradient_calculation"] == "FD_fwd": - gradient_idx_FD_fwd = gradient_idx - grad_res_3D_FD_fwd = grad_res_3D - iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res, grad_res_3D, gradient_idx, gpc.grid = self.check_results(results=res, + gradient_results=grad_res_3D, + gradient_results_idx=gradient_idx, + grid=gpc.grid, + com=com) + + if self.options["gradient_enhanced"] and self.options["gradient_calculation"] == "FD_fwd": + gradient_idx_FD_fwd = gradient_idx + grad_res_3D_FD_fwd = grad_res_3D + i_grid = gpc.grid.coords.shape[0] # update gpc matrix @@ -2822,7 +2992,7 @@ def run(self): options=self.options["grid_options"]) elif self.options["grid"] == Random or self.options["grid"] == LHS or self.options["grid"] == GP: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={n_grid_init}") + print(f"Creating initial grid ({self.options['grid'].__name__}) with n_grid={int(n_grid_init)}") grid = self.options["grid"](parameters_random=self.problem.parameters_random, n_grid=n_grid_init, options=self.options["grid_options"]) @@ -2903,6 +3073,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) + megpc = [0 for _ in range(n_qoi)] coeffs = [0 for _ in range(n_qoi)] @@ -3087,6 +3264,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) + megpc[i_qoi].grid = copy.deepcopy(grid) # update classifier @@ -3191,6 +3375,13 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results(results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) + i_grid = grid.n_grid # crop results to considered qoi @@ -3489,6 +3680,14 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid = self.check_results( + results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid, + com=com) + # crop results to considered qoi if self.options["qoi"] != "all": res = copy.deepcopy(res_all) @@ -3734,6 +3933,7 @@ def run(self): solver=megpc[i_qoi].gpc[d].solver, settings=megpc[i_qoi].gpc[d].settings, verbose=self.options["verbose"]) + megpc[i_qoi].error = error[i_qoi] # save gpc object and gpc coeffs if self.options["fn_results"] is not None: @@ -3949,12 +4149,12 @@ def run(self): options=self.options["grid_options"]) elif self.options["grid"] == Random or self.options["grid"] == GP: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={self.options['n_grid_gradient']}") + print(f"Creating initial grid ({self.options['grid'].__init__}) with n_grid={int(self.options['n_grid_gradient'])}") grid_original = self.options["grid"](parameters_random=self.problem.parameters_random, n_grid=self.options["n_grid_gradient"], options=self.options["grid_options"]) else: - print(f"Creating initial grid ({self.options['grid']}) with n_grid={self.options['n_grid_gradient']}") + print(f"Creating initial grid ({self.options['grid'].__init__}) with n_grid={int(self.options['n_grid_gradient'])}") grid_original = LHS(parameters_random=self.problem.parameters_random, n_grid=self.options["n_grid_gradient"], options={"criterion": "ese", @@ -4008,6 +4208,14 @@ def run(self): iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', tab=0, verbose=self.options["verbose"]) + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid_original = self.check_results( + results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid_original, + com=com) + # set qoi indices if self.options["qoi"] == "all": qoi_idx = np.arange(res_all.shape[1]) @@ -4207,80 +4415,87 @@ def run(self): i_grid = grid_original.coords.shape[0] # Determine gradient and update projection matrix in case of gradient enhanced gPC - if self.options["gradient_enhanced"]: - start_time = time.time() - grad_res_3D_all, gradient_idx = get_gradient(model=self.problem.model, - problem=self.problem, - grid=grid_original, - results=res_all, - com=com, - method=self.options["gradient_calculation"], - gradient_results_present=grad_res_3D_all_FD_fwd, - gradient_idx_skip=gradient_idx_FD_fwd, - i_iter=basis_order[0], - i_subiter=basis_order[1], - print_func_time=self.options["print_func_time"], - dx=self.options["gradient_calculation_options"]["dx"], - distance_weight=self.options["gradient_calculation_options"]["distance_weight"], - verbose=self.options["verbose"]) - - if self.options["gradient_calculation"] == "FD_fwd": - gradient_idx_FD_fwd = gradient_idx - grad_res_3D_all_FD_fwd = grad_res_3D_all - - iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', - tab=0, verbose=self.options["verbose"]) - - # Determine projection matrix - p_matrix, p_matrix_complete = determine_projection_matrix(gradient_results=grad_res_3D_all[:, q_idx, :], - lambda_eps=self.options["lambda_eps_gradient"]) - p_matrix_norm = np.sum(np.abs(p_matrix), axis=1) + start_time = time.time() + grad_res_3D_all, gradient_idx = get_gradient(model=self.problem.model, + problem=self.problem, + grid=grid_original, + results=res_all, + com=com, + method=self.options["gradient_calculation"], + gradient_results_present=grad_res_3D_all_FD_fwd, + gradient_idx_skip=gradient_idx_FD_fwd, + i_iter=basis_order[0], + i_subiter=basis_order[1], + print_func_time=self.options["print_func_time"], + dx=self.options["gradient_calculation_options"]["dx"], + distance_weight=self.options["gradient_calculation_options"]["distance_weight"], + verbose=self.options["verbose"]) + + if self.options["gradient_calculation"] == "FD_fwd": + gradient_idx_FD_fwd = gradient_idx + grad_res_3D_all_FD_fwd = grad_res_3D_all + + iprint('Gradient evaluation: ' + str(time.time() - start_time) + ' sec', + tab=0, verbose=self.options["verbose"]) + + # check validity of results and resample in case the model could not be evaluated at some sampling points + res_all, grad_res_3D_all, gradient_idx, grid_original = self.check_results( + results=res_all, + gradient_results=grad_res_3D_all, + gradient_results_idx=gradient_idx, + grid=grid_original, + com=com) + + # Determine projection matrix + p_matrix, p_matrix_complete = determine_projection_matrix(gradient_results=grad_res_3D_all[:, q_idx, :], + lambda_eps=self.options["lambda_eps_gradient"]) + p_matrix_norm = np.sum(np.abs(p_matrix), axis=1) + + # save projection matrix in gPC object + gpc[i_qoi].p_matrix = copy.deepcopy(p_matrix) + gpc[i_qoi].p_matrix_norm = copy.deepcopy(p_matrix_norm) + + # Set up reduced gPC + dim_reduced = p_matrix.shape[0] + iprint("Dimension of reduced problem: {}".format(dim_reduced), + tab=0, verbose=self.options["verbose"]) + + # Update gPC object if dimension has changed + if dim_reduced != gpc[i_qoi].problem.dim: + parameters_reduced = OrderedDict() + + for i in range(dim_reduced): + parameters_reduced["n{}".format(i)] = Beta(pdf_shape=[1., 1.], + pdf_limits=[-1., 1.]) + + self.problem_reduced[i_qoi] = Problem(model=self.problem.model, + parameters=parameters_reduced) + + # Create reduced gPC object of order - 1 and add rest of basisfunctions + # of this subiteration afterwards + gpc[i_qoi] = Reg(problem=self.problem_reduced[i_qoi], + order=basis_order[0] * np.ones(self.problem_reduced[i_qoi].dim), + order_max=basis_order[0], + order_max_norm=self.options["order_max_norm"], + interaction_order=self.options["interaction_order"], + interaction_order_current=basis_order[1], + options=self.options, + validation=self.validation) + + # save original problem + gpc[i_qoi].problem_original = self.problem # save projection matrix in gPC object gpc[i_qoi].p_matrix = copy.deepcopy(p_matrix) gpc[i_qoi].p_matrix_norm = copy.deepcopy(p_matrix_norm) - # Set up reduced gPC - dim_reduced = p_matrix.shape[0] - iprint("Dimension of reduced problem: {}".format(dim_reduced), - tab=0, verbose=self.options["verbose"]) - - # Update gPC object if dimension has changed - if dim_reduced != gpc[i_qoi].problem.dim: - parameters_reduced = OrderedDict() - - for i in range(dim_reduced): - parameters_reduced["n{}".format(i)] = Beta(pdf_shape=[1., 1.], - pdf_limits=[-1., 1.]) - - self.problem_reduced[i_qoi] = Problem(model=self.problem.model, - parameters=parameters_reduced) - - # Create reduced gPC object of order - 1 and add rest of basisfunctions - # of this subiteration afterwards - gpc[i_qoi] = Reg(problem=self.problem_reduced[i_qoi], - order=basis_order[0] * np.ones(self.problem_reduced[i_qoi].dim), - order_max=basis_order[0], - order_max_norm=self.options["order_max_norm"], - interaction_order=self.options["interaction_order"], - interaction_order_current=basis_order[1], - options=self.options, - validation=self.validation) - - # save original problem - gpc[i_qoi].problem_original = self.problem - - # save projection matrix in gPC object - gpc[i_qoi].p_matrix = copy.deepcopy(p_matrix) - gpc[i_qoi].p_matrix_norm = copy.deepcopy(p_matrix_norm) - - # Save settings and options in gpc object - gpc[i_qoi].solver = self.options["solver"] - gpc[i_qoi].settings = self.options["settings"] - gpc[i_qoi].options = copy.deepcopy(self.options) - gpc[i_qoi].error = error - gpc[i_qoi].relative_error_nrmsd = nrmsd - gpc[i_qoi].relative_error_loocv = loocv + # Save settings and options in gpc object + gpc[i_qoi].solver = self.options["solver"] + gpc[i_qoi].settings = self.options["settings"] + gpc[i_qoi].options = copy.deepcopy(self.options) + gpc[i_qoi].error = error + gpc[i_qoi].relative_error_nrmsd = nrmsd + gpc[i_qoi].relative_error_loocv = loocv # assign transformed grid gpc[i_qoi].grid = project_grid(grid=grid_original, p_matrix=p_matrix, mode="reduce") diff --git a/pygpc/GPC.py b/pygpc/GPC.py index fcf1e61a..b1ba17ae 100755 --- a/pygpc/GPC.py +++ b/pygpc/GPC.py @@ -471,9 +471,12 @@ def validate(self, coeffs, results=None, gradient_results=None, qoi_idx=None): error_norm=self.options["error_norm"])) self.error.append(self.relative_error_loocv[-1]) + elif self.options["error_type"] is None: + self.error.append(None) + return self.error[-1] - def get_pdf(self, coeffs, n_samples, output_idx=None): + def get_pdf(self, coeffs, n_samples, output_idx=None, filter=True, return_samples=False): """ Determine the estimated pdfs of the output quantities pdf_x, pdf_y = SGPC.get_pdf(coeffs, n_samples, output_idx=None) @@ -486,6 +489,10 @@ def get_pdf(self, coeffs, n_samples, output_idx=None): Number of samples used to estimate output pdfs output_idx: ndarray, optional, default=None [1 x n_out] Index of output quantities to consider (if output_idx=None, all output quantities are considered) + filter : bool, optional, default: True + Use savgol_filter to smooth probability density + return_samples : bool, optional, default: False + Additionally returns in and output samples with which the pdfs were computed Returns ------- @@ -493,6 +500,10 @@ def get_pdf(self, coeffs, n_samples, output_idx=None): x-coordinates of output pdfs of output quantities pdf_y: ndarray of float [100 x n_out] y-coordinates of output pdfs (probability density of output quantity) + samples_in : ndarray of float [n_samples x dim] (optional) + Input samples (if return_samples=True) + samples_out : ndarray of float [n_samples x n_out] (optional) + Output samples (if return_samples=True) """ # handle (N,) arrays @@ -501,7 +512,7 @@ def get_pdf(self, coeffs, n_samples, output_idx=None): # if output index array is not provided, determine pdfs of all outputs if output_idx is None: - output_idx = np.linspace(0, coeffs.shape[1]) + output_idx = np.arange(0, coeffs.shape[1]) output_idx = output_idx[np.newaxis, :] n_out = len(output_idx) @@ -518,13 +529,17 @@ def get_pdf(self, coeffs, n_samples, output_idx=None): pdf_y[:, i_out], tmp = np.histogram(samples_out[:, i_out], bins=100, density=True) pdf_x[:, i_out] = (tmp[1:] + tmp[0:-1]) / 2. - pdf_y[:, i_out] = savgol_filter(pdf_y[:, i_out], 51, 5) + if filter: + pdf_y[:, i_out] = savgol_filter(pdf_y[:, i_out], 51, 5) # kde = scipy.stats.gaussian_kde(samples_out[:, i_out], bw_method=0.1 / samples_out[:, i_out].std(ddof=1)) # pdf_y[:, i_out] = kde(pdf_x[:, i_out]) # pdf_x[:, i_out] = np.linspace(samples_out[:, i_out].min(), samples_out[:, i_out].max(), 100) - return pdf_x, pdf_y + if return_samples: + return pdf_x, pdf_y, samples_in, samples_out + else: + return pdf_x, pdf_y def get_samples(self, coeffs, n_samples, output_idx=None): """ diff --git a/pygpc/Grid.py b/pygpc/Grid.py index 64de3e93..05984075 100755 --- a/pygpc/Grid.py +++ b/pygpc/Grid.py @@ -106,6 +106,9 @@ def coords(self): @coords.setter def coords(self, value): + if value.ndim == 1: + value = value[np.newaxis, :] + self._coords = value if value is not None: @@ -121,6 +124,9 @@ def coords_norm(self): @coords_norm.setter def coords_norm(self, value): + if value.ndim == 1: + value = value[np.newaxis, :] + self._coords_norm = value if value is not None: @@ -136,6 +142,8 @@ def coords_gradient(self): @coords_gradient.setter def coords_gradient(self, value): + assert value.ndim == 3, "Specify coords_gradient as 3D tensor of shape [n_grid x dim x dim]" + self._coords_gradient = value if value is not None: @@ -151,6 +159,7 @@ def coords_gradient_norm(self): @coords_gradient_norm.setter def coords_gradient_norm(self, value): + assert value.ndim == 3, "Specify coords_gradient_norm as 3D tensor of shape [n_grid x dim x dim]" self._coords_gradient_norm = value if value is not None: @@ -901,6 +910,83 @@ def __init__(self, parameters_random, n_grid=None, options=None, coords=None, co # Seed of random grid (if necessary to reproduce random grid) np.random.seed(self.seed) + def resample(self, idx, classifier=None, domain=None, gradient=False, results=None, type=None): + """ + Replace grid points specified by index. Modifies grid object in place. + + Parameters + ---------- + idx : np.ndarray of int [n_grid_points_replace] + Indices of grid points to replace by resampling. + classifier : Classifier object, optional, default: None + Classifier + domain : int, optional, default: None + Adds grid points only in specified domain (needs Classifier object including a predict() method) + gradient : bool, optional, default: False + Add corresponding gradient grid points + results : np.ndarray of float [n_grid x n_qoi] + Results computed so far before adding additional grid points + type : str, optional, default: None + Type of adding new grid points + - "GP": Gaussian process regression (points are added where the uncertainty of sampling is very high). + Does only work for Random, LHS, and GP grids. + - None: grid points are added according to the grid type. + """ + # create temporary grid + grid_tmp = copy.deepcopy(self) + n_grid = self.n_grid + + # extend grid by number of grid points to resample + grid_tmp.extend_random_grid(n_grid_new=self.n_grid + len(idx), + classifier=classifier, + domain=domain, + gradient=gradient, + results=results, + type=type) + + # copy options (seed increment) + self.options = copy.deepcopy(grid_tmp.options) + + # overwrite grid points to resample in original grid with new grid points from temporary grid + self.coords[idx, ] = grid_tmp.coords[n_grid:, ] + self.coords_norm[idx, ] = grid_tmp.coords_norm[n_grid:, ] + + # generate and replace unique IDs of new grid points + for _idx in idx: + self.coords_id[_idx] = uuid.uuid4() + + # create new gradient grid points if required + if self.coords_gradient is not None: + self._coords_gradient = None + self._coords_gradient_norm = None + self.n_grid_gradient = None + self.create_gradient_grid() + + def delete(self, idx): + """ + Delete grid points by index. Modifies grid object in place. + + Parameters + ---------- + idx : int or np.ndarray of int + Indices of grid points to delete. + """ + + if type(idx) in [int, float, np.ndarray, list]: + idx = np.array(idx) + + # delete grid points + self.coords = np.delete(self.coords, idx, axis=0) + self.coords_norm = np.delete(self.coords_norm, idx, axis=0) + + # remove unique IDs of grid points + self.coords_id = [self.coords_id[i] for i in range(self.n_grid) if i not in idx] + + # delete gradient grid points + if self.coords_gradient is not None: + self.coords_gradient = np.delete(self.coords_gradient, idx, axis=0) + self.coords_gradient_norm = np.delete(self.coords_gradient_norm, idx, axis=0) + def extend_random_grid(self, n_grid_new=None, coords=None, coords_norm=None, classifier=None, domain=None, gradient=False, results=None, type=None): """ @@ -959,13 +1045,20 @@ def extend_random_grid(self, n_grid_new=None, coords=None, coords_norm=None, cla self.coords_norm = np.vstack([self.coords_norm, new_grid.coords_norm]) elif type == "GP": + if "n_pool" not in list(self.options.keys()): + self.options["n_pool"] = 1000 + # If results are given, determine optimal hyperparameters for Gaussian Process Regression if results is not None: self.options["lengthscale"], self.options["variance"] = \ get_parameters_gaussian_process(Xtrain=self.coords_norm, ytrain=results[:, 0]) - tqdm.write(f"Adding GP grid points #{self.n_grid + 1} ... #{n_grid_new}") + if (self.n_grid + 1) == n_grid_new: + tqdm.write(f"Adding GP grid point #{self.n_grid + 1}") + else: + tqdm.write(f"Adding GP grid points #{self.n_grid + 1} ... #{n_grid_new}") + for _ in tqdm(range(n_grid_add)): # Determine new grid points where uncertainty of output is highest new_grid = self.get_coords_gaussian_process(n_grid_add=1, @@ -992,13 +1085,20 @@ def extend_random_grid(self, n_grid_new=None, coords=None, coords_norm=None, cla self.coords_norm = np.vstack([self.coords_norm, new_grid.coords_norm]) elif type == "GP": + if "n_pool" not in list(self.options.keys()): + self.options["n_pool"] = 1000 + # If results are given, determine optimal hyperparameters for Gaussian Process Regression if results is not None: self.options["lengthscale"], self.options["variance"] = \ get_parameters_gaussian_process(Xtrain=self.coords_norm, ytrain=results[:, 0]) - print(f"Adding GP grid points #{self.n_grid + 1} ... #{n_grid_new})") + if (self.n_grid + 1) == n_grid_new: + tqdm.write(f"Adding GP grid point #{self.n_grid + 1}") + else: + tqdm.write(f"Adding GP grid points #{self.n_grid + 1} ... #{n_grid_new}") + for _ in tqdm(range(n_grid_add)): # Determine new grid points where uncertainty of output is highest new_grid = self.get_coords_gaussian_process(n_grid_add=1, @@ -1059,7 +1159,13 @@ def extend_random_grid(self, n_grid_new=None, coords=None, coords_norm=None, cla self.coords = np.vstack([self.coords, coords]) self.coords_norm = np.vstack([self.coords_norm, coords_norm]) - elif coords is not None and coords_norm is not None: + elif coords is not None or coords_norm is not None: + # append points to existing grid + if coords_norm is None and coords is not None: + coords_norm = self.get_normalized_coordinates(coords=coords) + if coords is None and coords_norm is not None: + coords = self.get_denormalized_coordinates(coords_norm=coords_norm) + # Number of new grid points n_grid_add = coords.shape[0] @@ -1068,12 +1174,11 @@ def extend_random_grid(self, n_grid_new=None, coords=None, coords_norm=None, cla if not (classifier.predict(coords_norm) == domain).all(): raise AssertionError("Specified coordinates are not lying in right domain!") - # append points to existing grid self.coords = np.vstack([self.coords, coords]) self.coords_norm = np.vstack([self.coords_norm, coords_norm]) else: - raise ValueError("Specify either n_grid_new or coords and coords_norm") + raise ValueError("Specify either n_grid_new or coords or coords_norm") # Generate and append unique IDs of new grid points self.coords_id = self.coords_id + [uuid.uuid4() for _ in range(n_grid_add)] @@ -1195,28 +1300,33 @@ def get_coords_gaussian_process(self, n_grid_add, lengthscale=0.2, variance=1., covariance = Kss - (v.T @ v) # we get the standard deviation from the covariance matrix - try: - std = np.sqrt(np.diag(covariance)) - except RuntimeWarning: - pass + std = np.sqrt(np.diag(covariance)) - # weight std with joint probability - joint_pdf = np.ones(n_pool) - for i_p, p in enumerate(self.parameters_random): - _, tmp = self.parameters_random[p].pdf_norm(x=grid_test.coords_norm[:, i_p]) - joint_pdf *= tmp - std_weighted = std * joint_pdf + if np.isnan(std).all(): + print("Warning: GP failed, adding random grid points instead.") + + # take random samples if GP failed + coords = grid_test.coords[:n_grid_add, :] + coords_norm = grid_test.coords_norm[:n_grid_add, :] + else: + # weight std with joint probability + joint_pdf = np.ones(n_pool) + for i_p, p in enumerate(self.parameters_random): + _, tmp = self.parameters_random[p].pdf_norm(x=grid_test.coords_norm[:, i_p]) + joint_pdf *= tmp + std_weighted = std * joint_pdf - idx_sorted = np.flip(np.argsort(std_weighted)) - idx_sorted = idx_sorted[~np.isnan(std_weighted[idx_sorted])] + idx_sorted = np.flip(np.argsort(std_weighted)) + idx_sorted = idx_sorted[~np.isnan(std_weighted[idx_sorted])] - coords = grid_test.coords[idx_sorted[:n_grid_add], :] - coords_norm = grid_test.coords_norm[idx_sorted[:n_grid_add], :] + coords = grid_test.coords[idx_sorted[:n_grid_add], :] + coords_norm = grid_test.coords_norm[idx_sorted[:n_grid_add], :] grid_new = Random(coords=coords, coords_norm=coords_norm, parameters_random=self.parameters_random) return grid_new + class Random(RandomGrid): """ Random grid object diff --git a/pygpc/SGPC.py b/pygpc/SGPC.py index bd5c1f6e..958c78c0 100755 --- a/pygpc/SGPC.py +++ b/pygpc/SGPC.py @@ -143,7 +143,7 @@ def get_mean(coeffs=None, samples=None): else: raise AssertionError("Provide either ""coeffs"" or ""samples"" to determine mean!") - mean = mean[np.newaxis, :] + # mean = mean[np.newaxis, :] return mean @@ -175,7 +175,7 @@ def get_std(coeffs=None, samples=None): else: raise AssertionError("Provide either ""coeffs"" or ""samples"" to determine standard deviation!") - std = std[np.newaxis, :] + # std = std[np.newaxis, :] return std diff --git a/pygpc/Visualization.py b/pygpc/Visualization.py index 17e0a1ce..28bc2b36 100755 --- a/pygpc/Visualization.py +++ b/pygpc/Visualization.py @@ -465,14 +465,14 @@ def plot_beta_pdf_fit(data, a_beta, b_beta, p_beta, q_beta, a_uni=None, b_uni=No plt.figure(1) plt.clf() - plt.rc('text', usetex=True) + plt.rc('text', usetex=False) plt.rc('font', size=18) ax = plt.gca() # legendtext = [r"e-pdf", r"$\beta$-pdf"] - legendtext = [r"$\beta$-pdf"] + legendtext = ["$\\beta$-pdf"] # plot histogram of data - n, bins, patches = plt.hist(data, bins=16, normed=1, color=[1, 1, 0.6], alpha=0.5) + n, bins, patches = plt.hist(data, bins=16, density=1, color=[1, 1, 0.6], alpha=0.5) # plot beta pdf (kernel density estimate) # plt.plot(kde_x, kde_y, 'r--', linewidth=2) diff --git a/pygpc/io.py b/pygpc/io.py index b23fd313..4204f0a6 100755 --- a/pygpc/io.py +++ b/pygpc/io.py @@ -98,7 +98,7 @@ def write_session_hdf5(obj, fname, folder="session", overwrite=True): write_dict_to_hdf5(fn_hdf5=fname, data=obj.__dict__, folder=folder) -def read_session(fname, folder="session"): +def read_session(fname, folder=None): """ Reads a gpc session in pickle or hdf5 file formal depending on the file extension in fname (.pkl or .hdf5) @@ -107,7 +107,7 @@ def read_session(fname, folder="session"): ---------- fname : str path to input file - folder : str, optional, default: "session" + folder : str, optional, default: None Path in .hdf5 file Returns @@ -176,7 +176,10 @@ def read_session_hdf5(fname, folder="session", verbose=False): from .RandomParameter import RandomParameter # model - model = read_model_from_hdf5(fn_hdf5=fname, folder=folder + "/model", verbose=verbose) + try: + model = read_model_from_hdf5(fn_hdf5=fname, folder=folder + "/model", verbose=verbose) + except KeyError: + model = None # parameters parameters_unsorted = read_parameters_from_hdf5(fn_hdf5=fname, folder=folder + "/problem/parameters", diff --git a/pygpc/misc.py b/pygpc/misc.py index 23720483..458a2bae 100755 --- a/pygpc/misc.py +++ b/pygpc/misc.py @@ -364,11 +364,13 @@ def get_beta_pdf_fit(data, beta_tolerance=0, uni_interval=0, fn_plot=None): ---------- data: ndarray of float Data to fit beta distribution on - beta_tolerance: float, optional, default=0 - Tolerance interval to calculate the bounds of beta distribution - from observed data, e.g. 0.2 (+-20% tolerance on observed max and min value) - uni_interval: float, optional, default=0 - uniform distribution interval defined as fraction of beta distribution interval (e.g. 0.95 (95%)) + beta_tolerance: float or list of float, optional, default=0 + Specifies bounds of beta distribution. If float, it calculates the tolerance + from observed data, e.g. 0.2 (+-20% tolerance on observed max and min value). + If list, it takes [min, max] as bounds [a, b]. + uni_interval: float or list of float, optional, default=0 + uniform distribution interval. If float, the bounds are defined as a fraction of the beta distribution + interval (e.g. 0.95 (95%)). If list, it takes [min, max] as bounds [a, b]. fn_plot: str Filename of plot so save (.pdf and .png) @@ -388,7 +390,11 @@ def get_beta_pdf_fit(data, beta_tolerance=0, uni_interval=0, fn_plot=None): data_std = np.std(data) # fit beta distribution to data - if beta_tolerance > 0: + if type(beta_tolerance) is list: + a_beta = beta_tolerance[0] + b_beta = beta_tolerance[1] + p_beta, q_beta, a_beta, ab_beta = scipy.stats.beta.fit(data, floc=a_beta, fscale=b_beta - a_beta) + elif beta_tolerance > 0: # use user beta_tolerance of to set limits of distribution data_range = data.max()-data.min() a_beta = data.min()-beta_tolerance*data_range @@ -411,7 +417,11 @@ def get_beta_pdf_fit(data, beta_tolerance=0, uni_interval=0, fn_plot=None): # determine limits of uniform distribution [a_uni, b_uni] covering the # interval uni_interval of the beta distribution - if uni_interval > 0: + if type(uni_interval) is list: + a_uni = uni_interval[0] + b_uni = uni_interval[1] + uni_parameters = np.array([a_uni, b_uni]) + elif uni_interval > 0: a_uni = scipy.stats.beta.ppf((1 - uni_interval) / 2, p_beta, q_beta, loc=a_beta, scale=b_beta - a_beta) b_uni = scipy.stats.beta.ppf((1 + uni_interval) / 2, p_beta, q_beta, loc=a_beta, scale=b_beta - a_beta) uni_parameters = np.array([a_uni, b_uni]) diff --git a/pygpc/postprocessing.py b/pygpc/postprocessing.py index 1f12ca04..d76cd47e 100755 --- a/pygpc/postprocessing.py +++ b/pygpc/postprocessing.py @@ -1,4 +1,5 @@ import h5py +import matplotlib import numpy as np import pandas as pd @@ -621,13 +622,18 @@ def get_sens_summary(fn_gpc, parameters_random, fn_out=None): return sobol, gsens -def plot_sens_summary(sobol, gsens, multiple_qoi=False, qois=None, results=None, - y_label="y", x_label="x", sobol_donut=True): +def plot_sens_summary(sobol, gsens, session=None, coeffs=None, qois=None, mean=None, std=None, output_idx=None, + y_label="y", x_label="x", zlim=None, sobol_donut=True, plot_pdf_over_output_idx=False, + fn_plot=None): """ Plot summary of Sobol indices and global derivative based sensitivity coefficients Parameters ---------- + session : GPC Session object instance + GPC session object containing all information i.e., gPC, Problem, Model, Grid, Basis, RandomParameter instances + coeffs : ndarray of float [n_coeffs x n_out] or list of ndarray of float [n_qoi][n_coeffs x n_out] + GPC coefficients sobol : pandas DataFrame Pandas DataFrame containing the normalized Sobol indices from get_sens_summary() gsens: pandas DataFrame @@ -637,15 +643,40 @@ def plot_sens_summary(sobol, gsens, multiple_qoi=False, qois=None, results=None, multiple_qoi: Boolean Option to plot over a quantity of interest, needs an array of qoi values and results qois: numpy ndarray - Quantities of interest - results: numpy ndarray - Results from gpc session + Axis of quantities of interest (x-axis, e.g. time) + mean: numpy ndarray + Mean from gpc session (determined with e.g.: pygpc.SGPC.get_mean(coeffs)) + std: numpy ndarray + Std from gpc session (determined with e.g.: pygpc.SGPC.get_std(coeffs)) + (can be given and plotted when plot_pdf_over_output_idx=False) + output_idx : int, str or None, optional, default=0 + Indices of output quantity to consider + x_label : str + Label of x-axis in case of multiple QOI plots + y_label : str + Label of y-axis in case of multiple QOI plots + zlim : list of float, optional, default: None + Limits of 3D plot (e.g. pdf) in z direction + plot_pdf_over_output_idx : bool, optional, default: False + Plots pdf as a surface plot over output index (e.g. a time axis) + fn_plot : str, optional, default: None + Filename of the plot to save (.png or .pdf) """ import matplotlib.pyplot as plt - glob_sens = gsens.values.flatten() + if type(output_idx) is int: + output_idx = [output_idx] + + if output_idx is None or output_idx == "all": + if coeffs is not None: + output_idx = np.arange(coeffs.shape[1]) + else: + output_idx = np.array([0]) + + glob_sens = gsens.values[:, output_idx].flatten() gsens_keys = gsens["global_sens (qoi 0)"].keys() - sobols = sobol.values.flatten() + + sobols = sobol.values[:, output_idx].flatten() sobol_keys = sobol["sobol_norm (qoi 0)"].keys() # ignore very low Sobol indices @@ -657,7 +688,7 @@ def plot_sens_summary(sobol, gsens, multiple_qoi=False, qois=None, results=None, sobols = sobols[mask] sobol_labels = [s for i, s in enumerate(sobol_labels) if mask[i]] - if multiple_qoi == False: + if len(output_idx) == 1: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(7, 7)) if sobol_donut: wedgeprops = {"linewidth": 0.5, 'width': 0.5, "edgecolor": "k"} @@ -711,35 +742,337 @@ def plot_sens_summary(sobol, gsens, multiple_qoi=False, qois=None, results=None, ax2.set_xlabel('parameter', fontsize=14) ax2.axhline(y=0, color='k', alpha=0.5) plt.tight_layout() - plt.show() + else: - if not (type(qois) == np.ndarray and type(results) == np.ndarray): - raise ValueError("Please specifiy qois and results as a numpy array of values!") - else: - fig, (ax1, ax2) = plt.subplots(2, 1, figsize=[8, 6]) - for i in range(sobol.values.shape[0]): - ax1.plot(qois, sobol.values[i]) - ax1.set_title("Sobol indices of the parameters over the qois", fontsize=14) + fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=[8, 9]) + + # Estimate output pdf + if plot_pdf_over_output_idx: + if session.qoi_specific: + pdf_x = np.zeros((100, len(output_idx))) + pdf_y = np.zeros((100, len(output_idx))) + + for i, o_idx in enumerate(output_idx): + pdf_x_tmp, pdf_y_tmp = session.gpc[o_idx].get_pdf(coeffs=coeffs[o_idx], n_samples=1e5, output_idx=0) + pdf_x[:, i] = pdf_x_tmp.flatten() + pdf_y[:, i] = pdf_y_tmp.flatten() + else: + pdf_x, pdf_y, _, y_gpc_samples = session.gpc[0].get_pdf(coeffs=coeffs, + n_samples=1e5, + output_idx=output_idx, + return_samples=True) + + # interpolate pdf data on common grid + x_interp = np.linspace(0, np.max(pdf_x), 1000) + y_interp = np.zeros((len(x_interp), np.shape(pdf_x)[1])) + + for i in range(np.shape(pdf_x)[1]): + y_interp[:, i] = np.interp(x_interp, pdf_x[:, i], pdf_y[:, i], left=0, right=0) + + if zlim is not None: + vmin = zlim[0] + vmax = zlim[1] + else: + vmin = np.min(y_interp) + vmax = np.max(y_interp) + + if qois is not None: + x_axis = qois + else: + x_axis = np.arange(0, len(output_idx)) + + xx, yy = np.meshgrid(x_axis, x_interp) + + # plot pdf over output_idx + ax1.pcolor(xx, yy, y_interp, cmap="bone_r", vmin=vmin, vmax=vmax) + ax1.plot(x_axis, mean, "r", linewidth=1.5) + legend_elements = [matplotlib.lines.Line2D([0], [0], color='r', lw=2, label='mean'), + matplotlib.patches.Patch(facecolor='grey', edgecolor='k', label='pdf')] + ax1.legend(handles=legend_elements) + + ax1.grid() + + if x_label is not None: ax1.set_xlabel(x_label, fontsize=14) - ax1.set_ylabel("Sobol index", fontsize=14) - ax1.set_yscale('log') - sobol_labels = [(x[1:-1].replace("'", " ")).replace(" ,", ",") for x in sobol_keys] - ax1.legend(sobol_labels) - # ax1.legend(sobol['sobol_norm (qoi 0)'].keys()) - ax1.set_xlim(qois[0], qois[-1] + (np.max(qois[-1]) * 1e-3)) + + if y_label is not None: + ax1.set_ylabel(y_label, fontsize=14) + + else: + # mean and std + ax1.plot(qois, mean) ax1.grid() + ax1.set_ylabel(y_label, fontsize=14) + ax1.set_xlabel(x_label, fontsize=14) + ax1.legend(["mean of " + y_label], loc='upper left') + ax1.set_xlim(qois[0], qois[-1] + (np.max(qois[-1]) * 1e-3)) + ax1.set_title("Mean and standard deviation of " + y_label, fontsize=14) + # ax2.set_ylim(np.min(results) + np.max(std_results), np.max(results) + np.max(std_results)) + ax1.fill_between(qois, mean - std, mean + std, color="grey", alpha=0.5) - # Plot mean and std of the model - mean_results = np.mean(results, axis=0) - std_results = np.std(results, axis=0) - ax2.plot(qois, mean_results) - ax2.grid() - ax2.set_ylabel(y_label, fontsize=14) + # sobol + for i in range(sobol.values.shape[0]): + ax2.plot(qois, sobol.values[i]) + ax2.set_title("Sobol indices of the parameters over the qois", fontsize=14) ax2.set_xlabel(x_label, fontsize=14) - ax2.legend(["mean of " + y_label], loc='upper left') - ax2.set_xlim(qois[0], qois[-1] + (np.max(qois[-1]) * 1e-3)) - ax2.set_title("Mean and standard deviation of " + y_label, fontsize=14) - # ax2.set_ylim(np.min(results) + np.max(std_results), np.max(results) + np.max(std_results)) - ax2.fill_between(qois, mean_results - std_results, mean_results + std_results, color="grey", alpha=0.5) + ax2.set_ylabel("Sobol index", fontsize=14) + ax2.set_yscale('log') + sobol_labels = [(x[1:-1].replace("'", " ")).replace(" ,", ",") for x in sobol_keys] + ax2.legend(sobol_labels) + # ax1.legend(sobol['sobol_norm (qoi 0)'].keys()) + ax2.set_xlim(qois[0], qois[-1] + (np.max(qois[-1]) * 1e-3)) + ax2.grid() + + # gsens + for i in range(gsens.values.shape[0]): + ax3.plot(qois, gsens.values[i]) + ax3.set_title("Global derivatives of the parameters over the qois", fontsize=14) + ax3.set_xlabel(x_label, fontsize=14) + ax3.set_ylabel("Global sensitivity", fontsize=14) + # ax3.set_yscale('log') + gsens_labels = [x for x in gsens_keys] + ax3.legend(gsens_labels) + # ax1.legend(sobol['sobol_norm (qoi 0)'].keys()) + ax3.set_xlim(qois[0], qois[-1] + (np.max(qois[-1]) * 1e-3)) + ax3.grid() + plt.tight_layout() + + if fn_plot is not None: + plt.savefig(fn_plot, dpi=600) + plt.close() + + +def plot_gpc(session, coeffs, random_vars=None, coords=None, results=None, n_grid=None, output_idx=0, fn_plot=None, + camera_pos=None, zlim=None, plot_pdf_over_output_idx=False, qois=None, x_label=None, y_label=None): + """ + Compares gPC approximation with original model function. Evaluates both at n_grid (x n_grid) sampling points and + calculate the difference between two solutions at the output quantity with output_idx and saves the plot as + *_QOI_idx_.png/pdf. Also generates one .hdf5 results file with the evaluation results. + + Parameters + ---------- + session : GPC Session object instance + GPC session object containing all information i.e., gPC, Problem, Model, Grid, Basis, RandomParameter instances + coeffs : ndarray of float [n_coeffs x n_out] or list of ndarray of float [n_qoi][n_coeffs x n_out] + GPC coefficients + random_vars: str or list of str [2] + Names of the random variables, the analysis is performed for one or max. two random variables + n_grid : int or list of int [2], optional + Number of samples in each dimension to compare the gPC approximation with the original model function. + A cartesian grid is generated based on the limits of the specified random_vars + coords : ndarray of float [n_coords x n_dim] + Parameter combinations for the random_vars the comparison is conducted with + output_idx : int, str or None, optional, default=0 + Indices of output quantity to consider + results: ndarray of float [n_coords x n_out] + If available, data of original model function at grid, containing all QOIs + fn_plot : str, optional, default: None + Filename of plot comparing original vs gPC model (*.png or *.pdf) + camera_pos : list [2], optional, default: None + Camera position of 3D surface plot (for 2 random variables only) [azimuth, elevation] + zlim : list of float, optional, default: None + Limits of 3D plot (e.g. pdf) in z direction + plot_pdf_over_output_idx : bool, optional, default: False + Plots pdf as a surface plot over output index (e.g. a time axis) + qois: numpy ndarray + Axis of quantities of interest (x-axis, e.g. time) + x_label : str + Label of x-axis in case of multiple QOI plots + y_label : str + Label of y-axis in case of multiple QOI plots + + Returns + ------- + : .hdf5 file + Data file containing the grid points and the results of the original and the gpc approximation + : .png and .pdf file + Plot comparing original vs gPC model + """ + y_orig = None + + if type(output_idx) is int: + output_idx = [output_idx] + + if output_idx is None or output_idx == "all": + output_idx = np.arange(coeffs.shape[1]) + + if random_vars is not None: + if type(random_vars) is not list: + random_vars = random_vars.tolist() + assert len(random_vars) <= 2 + + if n_grid is not None: + if n_grid and type(n_grid) is not list: + n_grid = n_grid.tolist() + else: + n_grid = [10, 10] + + if random_vars is not None: + # Create grid such that it includes the mean values of other random variables + grid = np.zeros((np.prod(n_grid), len(session.parameters_random))) + + idx = [] + idx_global = [] + + # sort random_vars according to gpc.parameters + for i_p, p in enumerate(session.parameters_random.keys()): + if p not in random_vars: + grid[:, i_p] = session.parameters_random[p].mean + + else: + idx.append(random_vars.index(p)) + idx_global.append(i_p) + + random_vars = [random_vars[i] for i in idx] + x = [] + + for i_p, p in enumerate(random_vars): + x.append(np.linspace(session.parameters_random[p].pdf_limits[0], + session.parameters_random[p].pdf_limits[1], + n_grid[i_p])) + + coords_gpc = get_cartesian_product(x) + if len(random_vars) == 2: + x1_2d, x2_2d = np.meshgrid(x[0], x[1]) + + grid[:, idx_global] = coords_gpc + + # Normalize grid + grid_norm = Grid(parameters_random=session.parameters_random).get_normalized_coordinates(grid) + + # Evaluate gPC expansion on grid and estimate output pdf + if session.qoi_specific: + y_gpc = np.zeros((grid_norm.shape[0], len(output_idx))) + pdf_x = np.zeros((100, len(output_idx))) + pdf_y = np.zeros((100, len(output_idx))) + + for i, o_idx in enumerate(output_idx): + y_gpc[:, i] = session.gpc[o_idx].get_approximation(coeffs=coeffs[o_idx], x=grid_norm, + output_idx=0).flatten() + + pdf_x_tmp, pdf_y_tmp = session.gpc[o_idx].get_pdf(coeffs=coeffs[o_idx], n_samples=1e5, output_idx=0) + pdf_x[:, i] = pdf_x_tmp.flatten() + pdf_y[:, i] = pdf_y_tmp.flatten() + else: + if not plot_pdf_over_output_idx: + y_gpc = session.gpc[0].get_approximation(coeffs=coeffs, + x=grid_norm, + output_idx=output_idx) + + pdf_x, pdf_y, _, y_gpc_samples = session.gpc[0].get_pdf(coeffs=coeffs, + n_samples=1e5, + output_idx=output_idx, + return_samples=True) + + if not plot_pdf_over_output_idx: + if results is not None: + y_orig = results[:, output_idx] + + if y_orig.ndim == 1: + y_orig = y_orig[:, np.newaxis] + + # add axes if necessary + if y_gpc.ndim == 1: + y_gpc = y_gpc[:, np.newaxis] + + # Plot results + matplotlib.rc('text', usetex=False) + matplotlib.rc('xtick', labelsize=13) + matplotlib.rc('ytick', labelsize=13) + fs = 14 + + if plot_pdf_over_output_idx: + # interpolate pdf data on common grid + x_interp = np.linspace(0, np.max(pdf_x), 1000) + y_interp = np.zeros((len(x_interp), np.shape(pdf_x)[1])) + + for i in range(np.shape(pdf_x)[1]): + y_interp[:, i] = np.interp(x_interp, pdf_x[:, i], pdf_y[:, i], left=0, right=0) + + if zlim is not None: + vmin = zlim[0] + vmax = zlim[1] + else: + vmin = np.min(y_interp) + vmax = np.max(y_interp) + + if qois is not None: + x_axis = qois + else: + x_axis = np.arange(0, len(output_idx)) + + xx, yy = np.meshgrid(x_axis, x_interp) + + # plot pdf over output_idx + plt.figure(figsize=[10, 6]) + plt.pcolor(xx, yy, y_interp, cmap="bone_r", vmin=vmin, vmax=vmax) + plt.plot(x_axis, np.mean(y_gpc_samples, axis=0), "r", linewidth=1.5) + plt.grid() + + if x_label is not None: + plt.xlabel(x_label, fontsize=14) + + if y_label is not None: + plt.ylabel(y_label, fontsize=14) + + plt.tight_layout() + + if fn_plot is not None: + plt.savefig(os.path.splitext(fn_plot)[0] + "_pdf_qoi.png", dpi=1200) + plt.savefig(os.path.splitext(fn_plot)[0] + "_pdf_qoi.pdf") + plt.close() + + else: + for _i, i in enumerate(output_idx): + fig = plt.figure(figsize=(12, 5)) + + # One random variable + if len(random_vars) == 1: + ax1 = fig.add_subplot(1, 2, 1) + ax1.plot(coords_gpc, y_gpc[:, i]) + if y_orig is not None: + ax1.scatter(coords[:, idx_global[0]], y_orig[:, _i], s=7 * np.ones(len(y_orig[:, i])), + facecolor='w', edgecolors='k') + legend = [r"gPC", r"original"] + else: + legend = [r"gPC"] + ax1.legend(legend, fontsize=fs) + ax1.set_xlabel(r"%s" % random_vars[0], fontsize=fs) + ax1.set_ylabel(r"y(%s)" % random_vars[0], fontsize=fs) + ax1.grid() + + # Two random variables + elif len(random_vars) == 2: + ax1 = fig.add_subplot(1, 2, 1, projection='3d') + im1 = ax1.plot_surface(x1_2d, x2_2d, np.reshape(y_gpc[:, _i], (x[1].size, x[0].size), order='f'), + cmap="jet", alpha=0.75, linewidth=0, edgecolors=None) + if y_orig is not None: + ax1.scatter(coords[:, idx_global[0]], coords[:, idx_global[1]], y_orig[:, _i], + 'k', alpha=1, edgecolors='k', depthshade=False) + ax1.set_title(r'gPC approximation', fontsize=fs) + ax1.set_xlabel(r"%s" % random_vars[0], fontsize=fs) + ax1.set_ylabel(r"%s" % random_vars[1], fontsize=fs) + + if camera_pos is not None: + ax1.view_init(elev=camera_pos[0], azim=camera_pos[1]) + + fig.colorbar(im1, ax=ax1, orientation='vertical') + + if zlim is not None: + ax1.set_zlim(zlim) + + # plot histogram of output data and gPC estimated pdf + ax2 = fig.add_subplot(1, 2, 2) + if y_orig is not None: + ax2.hist(y_orig[:, _i], density=True, bins=20, edgecolor='k') + ax2.plot(pdf_x[:, _i], pdf_y[:, _i], 'r') + ax2.grid() + ax2.set_title("Probability density", fontsize=fs) + ax2.set_xlabel(r'$y$', fontsize=16) + ax2.set_ylabel(r'$p(y)$', fontsize=16) plt.tight_layout() - plt.show() \ No newline at end of file + + if fn_plot is not None: + plt.savefig(os.path.splitext(fn_plot)[0] + "_qoi_" + str(output_idx[i]) + '.png', dpi=1200) + plt.savefig(os.path.splitext(fn_plot)[0] + "_qoi_" + str(output_idx[i]) + '.pdf') + plt.close() diff --git a/pygpc/testfunctions/testfunctions.py b/pygpc/testfunctions/testfunctions.py index b4307481..aa5d594e 100755 --- a/pygpc/testfunctions/testfunctions.py +++ b/pygpc/testfunctions/testfunctions.py @@ -1648,6 +1648,93 @@ def simulate(self, process_id=None, matlab_engine=None): return y_out, additional_data +class Peaks_NaN(AbstractModel): + """ + Three-dimensional peaks function returning NaN values for certain parameters (for testing). + + .. math:: + y = 3(1-x_1)^2 e^{-(x_1^2)-(x_3+1)^2}-10(\\frac{x_1}{5}-x_1^3-x_3^5) e^{-x_1^2-x_3^2}- + \\frac{1}{3} e^{-(x_1+1)^2 - x_3^2} + x_2 + + Parameters + ---------- + p["x1"]: float or ndarray of float [n_grid] + Parameter 1 + p["x2"]: float or ndarray of float [n_grid] + Parameter 2 + p["x3"]: float or ndarray of float [n_grid] + Parameter 3 + + Returns + ------- + y: ndarray of float [n_grid x n_out] + Output data + misc: dict or list of dict [n_grid] + Additional data, will be saved under its keys in the .hdf5 file during gPC simulations for every grid point + + Notes + ----- + .. plot:: + + import numpy as np + from pygpc.testfunctions import plot_testfunction as plot + from collections import OrderedDict + + parameters = OrderedDict() + parameters["x1"] = np.linspace(0, 1, 100) + parameters["x2"] = np.linspace(0, 1, 100) + + constants = OrderedDict() + constants["x3"] = 0. + plot("Peaks", parameters, constants, plot_3d=False) + """ + + def __init__(self, matlab_model=False): + super(type(self), self).__init__(matlab_model=matlab_model) + self.fname = inspect.getfile(inspect.currentframe()) + + def validate(self): + pass + + def simulate(self, process_id=None, matlab_engine=None): + + if type(self.p["x1"]) is np.ndarray: + self.p["x1"] = self.p["x1"].flatten() + if type(self.p["x2"]) is np.ndarray: + self.p["x2"] = self.p["x2"].flatten() + if type(self.p["x3"]) is np.ndarray: + self.p["x3"] = self.p["x3"].flatten() + + y = (3.0 * (1 - self.p["x1"]) ** 2. * np.exp(-(self.p["x1"] ** 2) - (self.p["x3"] + 1) ** 2) + - 10.0 * (self.p["x1"] / 5.0 - self.p["x1"] ** 3 - self.p["x3"] ** 5) + * np.exp(-self.p["x1"] ** 2 - self.p["x3"] ** 2) - 1.0 / 3 + * np.exp(-(self.p["x1"] + 1) ** 2 - self.p["x3"] ** 2)) + self.p["x2"] + + # add some NaN values for testing + mask_nan = self.p["x1"] < 1.5 + + y[mask_nan] = np.NaN + + additional_data = {"additional_data/list_mult_int": [1, 2, 3], + "additional_data/list_single_float": [0.2], + "additional_data/list_single_str": ["test"], + "additional_data/list_mult_str": ["test1", "test2"], + "additional_data/single_float": 0.2, + "additional_data/single_int": 2, + "additional_data/single_str": "test"} + + # # two output variables for testing + # if y.size > 1: + # y_out = np.array([y, 2 * y]).transpose() + # additional_data = y.size * [additional_data] + # else: + # y_out = np.array([y, 2 * y]) + if y.ndim == 1: + y_out = y[:, np.newaxis] + + return y_out, additional_data + + class DiscontinuousRidgeManufactureDecayGenzDiscontinuous(AbstractModel): """ N-dimensional discontinuous test function. The first QOI corresponds to the @@ -1927,6 +2014,89 @@ def deq(rho, t, alpha, beta, gamma): return y_out +class SurfaceCoverageSpecies_NaN(AbstractModel): + """ + Differential equation describing the time-evolution of the surface coverage rho [0, 1] for a given species [1]. + This problem has one or two fixed points according to the value of the recombination rate beta and it exhibits + smooth dependence on the other parameters. The statistics of the solution at t=1 are investigated considering + uncertainties in the initial coverage rho_0 and in the reaction parameter beta. Additionally uncertainty + in the surface absorption rate alpha can be considered to make the problem 3-dimensional. + Gamma=0.01 denotes the desorption rate. + + .. math:: \\frac{d\\rho}{dt} = \\alpha (1 - \\rho) - \\gamma \\rho - \\beta (\\rho - 1)^2 \\rho + + Parameters + ---------- + p["rho_0"]: ndarray of float [1] + Initial value rho(t=0) (uniform distributed [0, 1]) + p["beta"]: ndarray of float [1] + Recombination rate (uniform distributed [0, 20]) + p["alpha"]: ndarray of float [1] + Surface absorption rate (1 or uniform distributed [0.1, 2]) + + Returns + ------- + y: ndarray of float [1 x 1] + rho(t->1) + + Notes + ----- + .. plot:: + + import numpy as np + from pygpc.testfunctions import plot_testfunction as plot + from collections import OrderedDict + + parameters = OrderedDict() + parameters["rho_0"] = np.linspace(0, 1, 100) + parameters["beta"] = np.linspace(0, 20, 100) + + constants = OrderedDict() + constants["alpha"] = 1. + + plot("SurfaceCoverageSpecies", parameters, constants, plot_3d=False) + + .. [1] Le Maitre, O.P., Najm, H.N., Ghanem, R.G., Knio, O.M., (2004). + Multi-resolution analysis of Wiener-type uncertainty propagation schemes. + Journal of Computational Physics, 197, 502-531. + """ + + def __init__(self, matlab_model=False): + super(type(self), self).__init__(matlab_model=matlab_model) + self.fname = inspect.getfile(inspect.currentframe()) + + def validate(self): + pass + + def simulate(self, process_id=None, matlab_engine=None): + # System of 1st order DEQ + def deq(rho, t, alpha, beta, gamma): + return alpha * (1. - rho) - gamma * rho - beta * (rho - 1) ** 2 * rho + + # Constants + gamma = 0.01 + + # Simulation parameters + dt = 0.01 + t_end = 1. + t = np.arange(0, t_end, dt) + + # Solve + y_out = np.zeros((len(self.p["rho_0"]), 1)) + + for i in range(len(y_out)): + y = odeint(deq, self.p["rho_0"].flatten()[i], t, + args=(self.p["alpha"].flatten()[i], self.p["beta"].flatten()[i], gamma,)) + y_out[i, 0] = np.array([y[-1]]) + + # add some NaN values for testing + mask_nan = self.p["beta"] > 18.5 + + y_out[mask_nan, 0] = np.NaN + + return y_out + + class Franke(AbstractModel): """ Franke function [1] with 2 parameters. It is often used in regression or interpolation analysis. @@ -2414,6 +2584,79 @@ def simulate(self, process_id=None, matlab_engine=None): return y_out +class GenzOscillatory_NaN(AbstractModel): + """ + N-dimensional "Oscillatory" Genz function [1]. It is defined in the interval [0, 1] x ... x [0, 1]. + + .. math:: y = \\cos \\left( 2 \\pi u_1 + \\sum_{i=1}^{N}a_i x_i \\right) + + Parameters + ---------- + p["x1"]: float or ndarray of float [n_grid] + First parameter defined in [0, 1] + p["xi"]: float or ndarray of float [n_grid] + i-th parameter defined in [0, 1] + p["xN"]: float or ndarray of float [n_grid] + Nth parameter defined in [0, 1] + + Returns + ------- + y: ndarray of float [n_grid x 1] + Output + + Notes + ----- + .. plot:: + + import numpy as np + from pygpc.testfunctions import plot_testfunction as plot + from collections import OrderedDict + + parameters = OrderedDict() + parameters["x1"] = np.linspace(0, 1, 100) + parameters["x2"] = np.linspace(0, 1, 100) + + plot("GenzOscillatory", parameters) + + .. [1] Genz, A. (1984), Testing multidimensional integration routines. + Proc. of international conference on Tools, methods and languages for scientific + and engineering computation, Elsevier North-Holland, Inc., NewYork, NY, USA, pp. 81-94. + + .. [2] https://www.sfu.ca/~ssurjano/oscil.html + """ + + def __init__(self, matlab_model=False): + super(type(self), self).__init__(matlab_model=matlab_model) + self.fname = inspect.getfile(inspect.currentframe()) + + def validate(self): + pass + + def simulate(self, process_id=None, matlab_engine=None): + n = len(self.p.keys()) + + # set constants + u = 0.5 * np.ones(n) + a = 5 * np.ones(n) + + # determine sum + s = np.zeros(np.array(self.p[list(self.p.keys())[0]]).size) + + for i, key in enumerate(self.p.keys()): + s += a[i] * self.p[key] + + # determine output + y = np.cos(2 * np.pi * u[0] + s) + + y_out = y[:, np.newaxis] + + # insert some NaN values for testing + mask = self.p[list(self.p.keys())[0]] > 0.8 + y_out[mask, 0] = np.NaN + + return y_out + + class GenzProductPeak(AbstractModel): """ N-dimensional "ProductPeak" Genz function [1]. It is defined in the interval [0, 1] x ... x [0, 1]. @@ -2714,6 +2957,112 @@ def simulate(self, process_id=None, matlab_engine=None): return y_out +class Ishigami_NaN(AbstractModel): + """ + Three-dimensional test function of Ishigami. + + The Ishigami function of Ishigami & Homma (1990) [1] is used as an example + for uncertainty and sensitivity analysis methods, because it exhibits + strong nonlinearity and nonmonotonicity. It also has a peculiar + dependence on x3, as described by Sobol' & Levitan (1999) [2]. + The values of a and b used by Crestaux et al. (2007) [3] and Marrel et al. (2009) [4] are: a = 7 and b = 0.1. + + .. math:: y = \sin(x_1) + a \sin(x_2)^2 + b x_3^4 \sin(x_1) + + Parameters + ---------- + p["x1"]: float or ndarray of float [n_grid] + First parameter defined in [-pi, pi] + p["x2"]: float or ndarray of float [n_grid] + Second parameter defined in [-pi, pi] + p["x3"]: float or ndarray of float [n_grid] + Third parameter defined in [-pi, pi] + p["a"]: float + shape parameter (a=7) + p["b"]: float + shape parameter (b=0.1) + + Returns + ------- + y: ndarray of float [n_grid x 1] + Output data + + Notes + ----- + .. plot:: + + import numpy as np + from pygpc.testfunctions import plot_testfunction as plot + from collections import OrderedDict + + parameters = OrderedDict() + parameters["x1"] = np.linspace(-np.pi, np.pi, 100) + parameters["x2"] = np.linspace(-np.pi, np.pi, 100) + + constants = OrderedDict() + constants["a"] = 7. + constants["b"] = 0.1 + constants["x3"] = 0. + + plot("Ishigami", parameters, constants, plot_3d=False) + + .. [1] Ishigami, T., Homma, T. (1990, December). An importance quantification + technique in uncertainty analysis for computer models. In Uncertainty + Modeling and Analysis, 1990. Proceedings., First International Symposium + on (pp. 398-403). IEEE. + + .. [2] Sobol', I.M., Levitan, Y.L. (1999). On the use of variance reducing + multipliers in Monte Carlo computations of a global sensitivity index. + Computer Physics Communications, 117(1), 52-61. + + .. [3] Crestaux, T., Martinez, J.-M., Le Maitre, O., & Lafitte, O. (2007). + Polynomial chaos expansion for uncertainties quantification and sensitivity analysis [PowerPoint slides]. + Retrieved from SAMO 2007 website: http://samo2007.chem.elte.hu/lectures/Crestaux.pdf. + + .. [4] Marrel, A., Iooss, B., Laurent, B., & Roustant, O. (2009). + Calculations of sobol indices for the gaussian process metamodel. + Reliability Engineering & System Safety, 94(3), 742-751. + """ + + def __init__(self, matlab_model=False): + super(type(self), self).__init__(matlab_model=matlab_model) + self.fname = inspect.getfile(inspect.currentframe()) + + def validate(self): + pass + + def simulate(self, process_id=None, matlab_engine=None): + + if self.p["x1"] is not np.ndarray: + self.p["x1"] = np.array(self.p["x1"]) + + if self.p["x2"] is not np.ndarray: + self.p["x2"] = np.array(self.p["x2"]) + + if self.p["x3"] is not np.ndarray: + self.p["x3"] = np.array(self.p["x3"]) + + if self.p["a"] is not np.ndarray: + self.p["a"] = np.array(self.p["a"]) + + if self.p["b"] is not np.ndarray: + self.p["b"] = np.array(self.p["b"]) + + y = (np.sin(self.p["x1"].flatten()) + self.p["a"].flatten() * np.sin(self.p["x2"].flatten()) ** 2 + + self.p["b"].flatten() * self.p["x3"].flatten() ** 4 * np.sin(self.p["x1"].flatten())) + + if type(y) is not np.ndarray: + y = np.array([y]) + + y_out = y[:, np.newaxis] + + # insert some NaN values for testing + mask = (self.p["x1"] > 2).flatten() + y_out[mask, 0] = np.NaN + + return y_out + + class GFunction(AbstractModel): """ N-dimensional g-function used by Saltelli and Sobol (1995) [1]. @@ -3071,6 +3420,92 @@ def simulate(self, process_id=None, matlab_engine=None): return y_out +class DiscontinuousRidgeManufactureDecay_NaN(AbstractModel): + """ + N-dimensional testfunction containing a linear discontinuity. + On the one side the output corresponds to the Ridge function + and on the other side it correspond to the ManufactureDecay testfunction. + + .. math:: + y = \\begin{cases} + \\text{ManufactureDecay}(x), & \\text{if } \\sum_{i=1}^{N}x_i \\leq 1 \\\\ + \\text{Ridge}(x), & \\text{otherwise} + \\end{cases} + + Parameters + ---------- + p["x1"]: float or ndarray of float [n_grid] + First parameter [0, 1] + p["xi"]: float or ndarray of float [n_grid] + i-th parameter defined in [0, 1] + p["xN"]: float or ndarray of float [n_grid] + Nth parameter [0, 1] + + Returns + ------- + y: ndarray of float [n_grid x 1] + Output data + + Notes + ----- + .. plot:: + + import numpy as np + from pygpc.testfunctions import plot_testfunction as plot + from collections import OrderedDict + + parameters = OrderedDict() + parameters["x1"] = np.linspace(0, 1, 250) + parameters["x2"] = np.linspace(0, 1, 250) + + plot("DiscontinuousRidgeManufactureDecay", parameters) + """ + + def __init__(self, matlab_model=False): + super(type(self), self).__init__(matlab_model=matlab_model) + self.fname = inspect.getfile(inspect.currentframe()) + + def validate(self): + pass + + def simulate(self, process_id=None, matlab_engine=None): + + for key in self.p.keys(): + if self.p[key].ndim == 1: + self.p[key] = self.p[key][:, np.newaxis] + + x = np.hstack([self.p[key] for key in self.p.keys()]) + + y = np.zeros(x.shape[0]) + mask = (np.sum(x, axis=1) <= 1.).flatten() + # mask = np.logical_and(mask, np.linalg.norm(x-0.85, axis=1) > .8) + + p_1 = OrderedDict() + p_2 = OrderedDict() + + for i, key in enumerate(self.p.keys()): + p_1[key] = x[mask, i] + p_2[key] = x[np.logical_not(mask), i] + + model_1 = ManufactureDecay().set_parameters(p_1) + model_2 = Ridge().set_parameters(p_2) + + y_1 = model_1.simulate() + y_2 = model_2.simulate() + + y[mask] = y_1.flatten() + y[np.logical_not(mask)] = y_2.flatten() + + y_out = y[:, np.newaxis] + y_out = np.hstack((y_out, y_out)) + + # insert some NaN values for testing + mask = (self.p[list(self.p.keys())[0]] > 0.8).flatten() + y_out[mask, 0] = np.NaN + + return y_out + + class OakleyOhagan2004(AbstractModel): # Todo: @Lucas: remove text files """ diff --git a/pygpc/validation.py b/pygpc/validation.py index c9cde5ee..0c2064f0 100755 --- a/pygpc/validation.py +++ b/pygpc/validation.py @@ -415,310 +415,3 @@ def validate_gpc_plot(session, coeffs, random_vars, n_grid=None, coords=None, ou if fn_out is not None: plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx[i]) + '.png', dpi=1200) plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx[i]) + '.pdf') - - -def plot_gpc(session, coeffs, random_vars, coords, results, n_grid=None, output_idx=0, fn_out=None, camera_pos=None, - zlim=None): - """ - Compares gPC approximation with original model function. Evaluates both at n_grid (x n_grid) sampling points and - calculate the difference between two solutions at the output quantity with output_idx and saves the plot as - *_QOI_idx_.png/pdf. Also generates one .hdf5 results file with the evaluation results. - - Parameters - ---------- - session : GPC Session object instance - GPC session object containing all information i.e., gPC, Problem, Model, Grid, Basis, RandomParameter instances - coeffs : ndarray of float [n_coeffs x n_out] or list of ndarray of float [n_qoi][n_coeffs x n_out] - GPC coefficients - random_vars: str or list of str [2] - Names of the random variables, the analysis is performed for one or max. two random variables - n_grid : int or list of int [2], optional - Number of samples in each dimension to compare the gPC approximation with the original model function. - A cartesian grid is generated based on the limits of the specified random_vars - coords : ndarray of float [n_coords x n_dim] - Parameter combinations for the random_vars the comparison is conducted with - output_idx : int, optional, default=0 - Indices of output quantity to consider - results: ndarray of float [n_coords x n_out] - If available, data of original model function at grid, containing all QOIs - fn_out : str, optional, default: None - Filename of plot comparing original vs gPC model (*.png or *.pdf) - camera_pos : list [2], optional, default: None - Camera position of 3D surface plot (for 2 random variables only) [azimuth, elevation] - zlim : list of float, optional, default: None - Limits of 3D plot in z direction - - Returns - ------- - : .hdf5 file - Data file containing the grid points and the results of the original and the gpc approximation - : .png and .pdf file - Plot comparing original vs gPC model - """ - - if type(output_idx) is int: - output_idx = [output_idx] - - if type(random_vars) is not list: - random_vars = random_vars.tolist() - - assert len(random_vars) <= 2 - - if n_grid and type(n_grid) is not list: - n_grid = n_grid.tolist() - - # Create grid such that it includes the mean values of other random variables - grid = np.zeros((np.prod(n_grid), len(session.parameters_random))) - - - idx = [] - idx_global = [] - - # sort random_vars according to gpc.parameters - for i_p, p in enumerate(session.parameters_random.keys()): - if p not in random_vars: - grid[:, i_p] = session.parameters_random[p].mean - - else: - idx.append(random_vars.index(p)) - idx_global.append(i_p) - - random_vars = [random_vars[i] for i in idx] - x = [] - - for i_p, p in enumerate(random_vars): - x.append(np.linspace(session.parameters_random[p].pdf_limits[0], - session.parameters_random[p].pdf_limits[1], - n_grid[i_p])) - - coords_gpc = get_cartesian_product(x) - if len(random_vars) == 2: - x1_2d, x2_2d = np.meshgrid(x[0], x[1]) - - grid[:, idx_global] = coords_gpc - - # Normalize grid - grid_norm = Grid(parameters_random=session.parameters_random).get_normalized_coordinates(grid) - - # Evaluate gPC expansion on grid and estimate output pdf - if session.qoi_specific: - y_gpc = np.zeros((grid_norm.shape[0], len(output_idx))) - pdf_x = np.zeros((100, len(output_idx))) - pdf_y = np.zeros((100, len(output_idx))) - - for i, o_idx in enumerate(output_idx): - y_gpc[:, i] = session.gpc[o_idx].get_approximation(coeffs=coeffs[o_idx], x=grid_norm, - output_idx=0).flatten() - - pdf_x_tmp, pdf_y_tmp = session.gpc[o_idx].get_pdf(coeffs=coeffs[o_idx], n_samples=1e5, output_idx=0) - pdf_x[:, i] = pdf_x_tmp.flatten() - pdf_y[:, i] = pdf_y_tmp.flatten() - else: - y_gpc = session.gpc[0].get_approximation(coeffs=coeffs, x=grid_norm, output_idx=output_idx) - pdf_x, pdf_y = session.gpc[0].get_pdf(coeffs=coeffs, n_samples=1e5, output_idx=output_idx) - - y_orig = results[:, output_idx] - - # add axes if necessary - if y_gpc.ndim == 1: - y_gpc = y_gpc[:, np.newaxis] - - if y_orig.ndim == 1: - y_orig = y_orig[:, np.newaxis] - - # Plot results - matplotlib.rc('text', usetex=False) - matplotlib.rc('xtick', labelsize=13) - matplotlib.rc('ytick', labelsize=13) - fs = 14 - - for i in output_idx: - fig = plt.figure(figsize=(9.75, 5)) - - # One random variable - if len(random_vars) == 1: - ax1 = fig.add_subplot(1, 2, 1) - ax1.plot(coords_gpc, y_gpc[:, i]) - ax1.scatter(coords[:, idx_global[0]], y_orig[:, i], s=7*np.ones(len(y_orig[:, i])), facecolor='w', edgecolors='k') - ax1.legend([r"gPC", r"original",], fontsize=fs) - ax1.set_xlabel(r"%s" % random_vars[0], fontsize=fs) - ax1.set_ylabel(r"y(%s)" % random_vars[0], fontsize=fs) - ax1.grid() - - # Two random variables - elif len(random_vars) == 2: - ax1 = fig.add_subplot(1, 2, 1, projection='3d') - im1 = ax1.plot_surface(x1_2d, x2_2d, np.reshape(y_gpc[:, i], (x[1].size, x[0].size), order='f'), - cmap="jet", alpha=0.75, linewidth=0, edgecolors=None) - ax1.scatter(coords[:, idx_global[0]], coords[:, idx_global[1]], results, - 'k', alpha=1, edgecolors='k', depthshade=False) - ax1.set_title(r'gPC approximation', fontsize=fs) - ax1.set_xlabel(r"%s" % random_vars[0], fontsize=fs) - ax1.set_ylabel(r"%s" % random_vars[1], fontsize=fs) - - if camera_pos is not None: - ax1.view_init(elev=camera_pos[0], azim=camera_pos[1]) - - fig.colorbar(im1, ax=ax1, orientation='vertical') - - if zlim is not None: - ax1.set_zlim(zlim) - - # plot histogram of output data and gPC estimated pdf - ax2 = fig.add_subplot(1, 2, 2) - ax2.hist(results, density=True, bins=20, edgecolor='k') - ax2.plot(pdf_x, pdf_y, 'r') - ax2.grid() - ax2.set_title("Probability density", fontsize=fs) - ax2.set_xlabel(r'$y$', fontsize=16) - ax2.set_ylabel(r'$p(y)$', fontsize=16) - plt.tight_layout() - - if fn_out is not None: - plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx[i]) + '.png', dpi=1200) - plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx[i]) + '.pdf') - plt.close() - - - - -# def plot_gpc(gpc, coeffs, random_vars, n_grid=None, coords=None, output_idx=0, fn_out=None): -# """ -# Compares gPC approximation with original model function. Evaluates both at n_grid (x n_grid) sampling points and -# calculate the difference between two solutions at the output quantity with output_idx and saves the plot as -# *_QOI_idx_.png/pdf. Also generates one .hdf5 results file with the evaluation results. -# -# Parameters -# ---------- -# gpc : GPC object instance -# GPC object containing all information i.e., Problem, Model, Grid, Basis, RandomParameter instances -# coeffs : ndarray of float [n_coeffs x n_out] -# GPC coefficients -# random_vars: str or list of str [2] -# Names of the random variables, the analysis is performed for one or max. two random variables -# n_grid : int or list of int [2], optional -# Number of samples in each dimension to compare the gPC approximation with the original model function. -# A cartesian grid is generated based on the limits of the specified random_vars -# coords : ndarray of float [n_coords x n_dim] -# Parameter combinations for the random_vars the comparison is conducted with -# output_idx : int, optional, default=0 -# Indices of output quantity to consider -# fn_out : str -# Filename of plot comparing original vs gPC model (*_QOI_idx_.png is added) -# -# Returns -# ------- -# : .hdf5 file -# Data file containing the grid points and the results of the original and the gpc approximation -# : .png and .pdf file -# Plot comparing original vs gPC model -# """ -# output_idx_gpc = output_idx -# -# if type(gpc) is list: -# gpc = gpc[output_idx] -# coeffs = coeffs[output_idx] -# output_idx_gpc = 0 -# -# if isinstance(gpc, MEGPC): -# problem = gpc.problem -# else: -# if gpc.p_matrix is not None: -# problem = gpc.problem_original -# else: -# problem = gpc.problem -# -# if type(random_vars) is not list: -# random_vars = random_vars.tolist() -# -# if n_grid and type(n_grid) is not list: -# n_grid = n_grid.tolist() -# -# # Create grid such that it includes the mean values of other random variables -# if coords is None: -# grid = np.zeros((np.prod(n_grid), len(problem.parameters_random))) -# else: -# grid = np.zeros((coords.shape[0], len(problem.parameters_random))) -# -# idx = [] -# -# # sort random_vars according to gpc.parameters -# for i_p, p in enumerate(problem.parameters_random.keys()): -# if p not in random_vars: -# grid[:, i_p] = problem.parameters_random[p].mean -# -# else: -# idx.append(random_vars.index(p)) -# -# random_vars = [random_vars[i] for i in idx] -# x = [] -# -# if coords is None: -# n_grid = [n_grid[i] for i in idx] -# -# for i_p, p in enumerate(random_vars): -# x.append(np.linspace(problem.parameters_random[p].pdf_limits[0], -# problem.parameters_random[p].pdf_limits[1], -# n_grid[i_p])) -# -# coords = get_cartesian_product(x) -# -# else: -# for i_p, p in enumerate(random_vars): -# x.append(np.unique(coords[:, i_p])) -# -# grid[:, (grid == 0).all(axis=0)] = coords -# -# # Normalize grid -# grid_norm = Grid(parameters_random=problem.parameters_random).get_normalized_coordinates(grid) -# -# # Evaluate gPC expansion on grid -# y_gpc = gpc.get_approximation(coeffs=coeffs, x=grid_norm, output_idx=output_idx_gpc) -# -# # add axes if necessary -# if y_gpc.ndim == 1: -# y_gpc = y_gpc[:, np.newaxis] -# -# # Plot results -# matplotlib.rc('text', usetex=False) -# matplotlib.rc('xtick', labelsize=13) -# matplotlib.rc('ytick', labelsize=13) -# fs = 14 -# -# # One random variable -# if len(random_vars) == 1: -# fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, squeeze=True, figsize=(12, 5)) -# -# ax1.plot(coords, y_gpc) -# ax1.legend([r"gPC"], fontsize=fs) -# ax1.set_xlabel(r"$%s$" % random_vars[0], fontsize=fs) -# ax1.set_ylabel(r"$y(%s)$" % random_vars[0], fontsize=fs) -# ax1.grid() -# -# # Two random variables -# elif len(random_vars) == 2: -# fig, (ax1) = matplotlib.pyplot.subplots(nrows=1, ncols=1, -# sharex='all', sharey='all', -# squeeze=True, figsize=(4, 5)) -# -# x1_2d, x2_2d = np.meshgrid(x[0], x[1]) -# -# min_all = np.min(y_gpc) -# max_all = np.max(y_gpc) -# -# # gPC approximation -# im1 = ax1.pcolor(x1_2d, x2_2d, np.reshape(y_gpc, (x[1].size, x[0].size), order='f'), -# cmap="jet", -# vmin=min_all, -# vmax=max_all) -# ax1.set_title(r'gPC approximation', fontsize=fs) -# ax1.set_xlabel(r"$%s$" % random_vars[0], fontsize=fs) -# ax1.set_ylabel(r"$%s$" % random_vars[1], fontsize=fs) -# -# fig.colorbar(im1, ax=ax1, orientation='vertical') -# -# plt.tight_layout() -# -# if fn_out: -# plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx) + '.png', dpi=600) -# plt.savefig(os.path.splitext(fn_out)[0] + "_qoi_" + str(output_idx) + '.pdf') diff --git a/setup.py b/setup.py index cebb6e6c..8cc5f81e 100755 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ setup(name='pygpc', - version='0.3.3', + version='0.3.4', description='A sensitivity and uncertainty analysis toolbox for Python', author='Konstantin Weise', author_email='kweise@cbs.mpg.de', @@ -65,3 +65,19 @@ "Source Code": "https://github.com/pygpc-polynomial-chaos/pygpc"}, zip_safe=False, include_package_data=True) + + +""" +Notes for Mac M1 users: + +1. You’ll need to install: llvm and libomp from homebrew (be sure it’s arm versions). + +2. You’ll need to prepend llvm path (by default it’s /opt/homebrew/opt/llvm/bin) so system clang compilers from llvm (I did it with version 16) + +3. You’ll need to setup LDFLAGS and CPPFLAGS flags to include BOTH llvm and libomp so: +LDFLAGS = “-L/opt/homebrew/opt/llvm/lib -L/opt/homebrew/opt/libomp/lib” +CPPFLAFS = “-I/opt/homebrew/opt/llvm/include -I/opt/homebrew/opt/libomp/include” +(of course these are the default location… you can get the location with brew info llvm and brew info libomp) + +4. Finally, you just need to change openmp_link_args inside setup.py for pyGPC to “-lomp” instead of “-lgomp” +""" \ No newline at end of file diff --git a/tests/test_algorithms.py b/tests/test_algorithms.py index aa1adc70..3fdb77c3 100755 --- a/tests/test_algorithms.py +++ b/tests/test_algorithms.py @@ -79,7 +79,7 @@ def test_algorithms_000_Static_gpc_quad(self): Grid: TensorGrid """ global folder, plot, save_session_format - test_name = 'pygpc_test_000_Static_gpc_quad' + test_name = 'test_algorithms_000_Static_gpc_quad' print(test_name) # define model @@ -175,7 +175,7 @@ def test_algorithms_001_Static_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_001_Static_gpc' + test_name = 'test_algorithms_001_Static_gpc' print(test_name) # define model @@ -264,6 +264,64 @@ def test_algorithms_001_Static_gpc(self): print("done!\n") + def test_algorithms_001_Static_gpc_NaN(self): + """ + Algorithm: Static + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_001_Static_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.Peaks_NaN() + + # define problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[1.2, 2]) + parameters["x2"] = 1.25 + parameters["x3"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 0.6]) + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["method"] = "reg" + options["solver"] = "Moore-Penrose" + options["settings"] = None + options["order"] = [9, 9] + options["order_max"] = 9 + options["interaction_order"] = 2 + options["matrix_ratio"] = 0.7 + options["error_type"] = "loocv" + options["n_cpu"] = 0 + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["gradient_enhanced"] = True + options["gradient_calculation"] = "FD_1st2nd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["backend"] = "omp" + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} + options["adaptive_sampling"] = True + + # define algorithm + algorithm = pygpc.Static(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC algorithm + session, coeffs, results = session.run() + + # read session + nrmsd = session.gpc[0].error[-1]*100 + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) + + print("done!\n") + def test_algorithms_002_MEStatic_gpc(self): """ Algorithm: MEStatic @@ -272,7 +330,7 @@ def test_algorithms_002_MEStatic_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_002_MEStatic_gpc' + test_name = 'test_algorithms_002_MEStatic_gpc' print(test_name) # define model @@ -367,6 +425,76 @@ def test_algorithms_002_MEStatic_gpc(self): print("done!\n") + def test_algorithms_002_MEStatic_gpc_NaN(self): + """ + Algorithm: MEStatic + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_002_MEStatic_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.SurfaceCoverageSpecies_NaN() + + # define problem + parameters = OrderedDict() + parameters["rho_0"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 1]) + parameters["beta"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 20]) + parameters["alpha"] = 1. + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["method"] = "reg" + options["solver"] = "LarsLasso" + options["settings"] = None + options["order"] = [15, 15] + options["order_max"] = 12 + options["interaction_order"] = 2 + options["n_grid"] = 1000 + options["matrix_ratio"] = None + options["n_cpu"] = 0 + options["gradient_enhanced"] = True + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["error_type"] = "loocv" + options["qoi"] = "all" + options["classifier"] = "learning" + options["classifier_options"] = {"clusterer": "KMeans", + "n_clusters": 2, + "classifier": "MLPClassifier", + "classifier_solver": "lbfgs"} + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} + + # define algorithm + algorithm = pygpc.MEStatic(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC algorithm + session, coeffs, results = session.run() + + # read session + session = pygpc.read_session(fname=session.fn_session, folder=session.fn_session_folder) + + nrmsd = session.gpc[0].error[-1] * 100 + + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 5, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + def test_algorithms_003_StaticProjection_gpc(self): """ Algorithm: StaticProjection @@ -375,7 +503,7 @@ def test_algorithms_003_StaticProjection_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_003_StaticProjection_gpc' + test_name = 'test_algorithms_003_StaticProjection_gpc' print(test_name) # define model @@ -392,17 +520,17 @@ def test_algorithms_003_StaticProjection_gpc(self): options["method"] = "reg" options["solver"] = "LarsLasso" options["settings"] = None - options["order"] = [10] - options["order_max"] = 10 + options["order"] = [15, 15] + options["order_max"] = 15 options["interaction_order"] = 1 options["n_cpu"] = 0 options["error_type"] = "nrmsd" options["n_samples_validation"] = 1e3 options["eps"] = 1e-3 options["error_norm"] = "relative" - options["matrix_ratio"] = 2 + options["matrix_ratio"] = None options["qoi"] = 0 - options["n_grid"] = 5 + options["n_grid"] = 500 options["fn_results"] = os.path.join(folder, test_name) options["save_session_format"] = save_session_format options["gradient_enhanced"] = False @@ -459,7 +587,72 @@ def test_algorithms_003_StaticProjection_gpc(self): plot=plot) print("> Maximum NRMSD (gpc vs original): {:.2}%".format(np.max(nrmsd)*100)) - # self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + + def test_algorithms_003_StaticProjection_gpc_NaN(self): + """ + Algorithm: StaticProjection + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_003_StaticProjection_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.GenzOscillatory_NaN() + + # define problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1., 1.], pdf_limits=[0., 1.]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1., 1.], pdf_limits=[0., 1.]) + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["method"] = "reg" + options["solver"] = "LarsLasso" + options["settings"] = None + options["order"] = [15, 15] + options["order_max"] = 15 + options["interaction_order"] = 1 + options["n_cpu"] = 0 + options["error_type"] = "loocv" + options["eps"] = 1e-3 + options["error_norm"] = "relative" + options["matrix_ratio"] = None + options["qoi"] = 0 + options["n_grid"] = 500 + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["gradient_enhanced"] = False + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["grid"] = pygpc.LHS_L1 + options["grid_options"] = {"criterion": ["tmc", "cc"]} + + # define algorithm + algorithm = pygpc.StaticProjection(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC algorithm + session, coeffs, results = session.run() + + # read session + session = pygpc.read_session(fname=session.fn_session, folder=session.fn_session_folder) + nrmsd = session.gpc[0].error[-1]*100 + + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) print("> Checking file consistency...") files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") @@ -475,7 +668,7 @@ def test_algorithms_004_MEStaticProjection_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_004_MEStaticProjection_gpc' + test_name = 'test_algorithms_004_MEStaticProjection_gpc' print(test_name) # define model @@ -492,8 +685,8 @@ def test_algorithms_004_MEStaticProjection_gpc(self): options["method"] = "reg" options["solver"] = "LarsLasso" options["settings"] = None - options["order"] = [3, 3] - options["order_max"] = 3 + options["order"] = [5, 5] + options["order_max"] = 5 options["interaction_order"] = 2 options["matrix_ratio"] = 2 options["n_cpu"] = 0 @@ -570,6 +763,74 @@ def test_algorithms_004_MEStaticProjection_gpc(self): print("done!\n") + def test_algorithms_004_MEStaticProjection_gpc_NaN(self): + """ + Algorithm: MEStaticProjection + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_004_MEStaticProjection_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.DiscontinuousRidgeManufactureDecay_NaN() + + # define problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 1]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 1]) + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["method"] = "reg" + options["solver"] = "LarsLasso" + options["settings"] = None + options["order"] = [5, 5] + options["order_max"] = 5 + options["interaction_order"] = 2 + options["matrix_ratio"] = 2 + options["n_cpu"] = 0 + options["gradient_enhanced"] = False + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["n_grid"] = 2000 + options["error_type"] = "loocv" + options["n_samples_validation"] = 1e3 + options["qoi"] = "all" + options["classifier"] = "learning" + options["classifier_options"] = {"clusterer": "KMeans", + "n_clusters": 2, + "classifier": "MLPClassifier", + "classifier_solver": "lbfgs"} + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": 1} + + # define algorithm + algorithm = pygpc.MEStaticProjection(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC session + session, coeffs, results = session.run() + + # read session + nrmsd = session.gpc[0].error[-1] * 100 + + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 5, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + def test_algorithms_005_RegAdaptive_gpc(self): """ Algorithm: RegAdaptive @@ -578,7 +839,7 @@ def test_algorithms_005_RegAdaptive_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_005_RegAdaptive_gpc' + test_name = 'test_algorithms_005_RegAdaptive_gpc' print(test_name) # Model @@ -666,7 +927,76 @@ def test_algorithms_005_RegAdaptive_gpc(self): plot=plot) print("> Maximum NRMSD (gpc vs original): {:.2}%".format(np.max(nrmsd)*100)) - # self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + + def test_algorithms_005_RegAdaptive_gpc_NaN(self): + """ + Algorithm: RegAdaptive + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_005_RegAdaptive_gpc_NaN' + print(test_name) + + # Model + model = pygpc.testfunctions.Ishigami_NaN() + + # Problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[-np.pi, np.pi]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[-np.pi, np.pi]) + parameters["x3"] = 0. + parameters["a"] = 7. + parameters["b"] = 0.1 + + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["order_start"] = 8 + options["order_end"] = 20 + options["solver"] = "LarsLasso" + options["interaction_order"] = 2 + options["order_max_norm"] = 1.0 + options["n_cpu"] = 0 + options["adaptive_sampling"] = False + options["gradient_enhanced"] = True + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["eps"] = 0.0075 + # options["grid"] = pygpc.LHS + # options["grid_options"] = {"criterion": "ese", "seed": seed} + + options["grid"] = pygpc.L1 + options["grid_options"] = {"criterion": ["mc"], + "method": "iter", + "n_iter": 1000, + "seed": seed} + + # define algorithm + algorithm = pygpc.RegAdaptive(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC session + session, coeffs, results = session.run() + + # read session + session = pygpc.read_session(fname=session.fn_session, folder=session.fn_session_folder) + nrmsd = session.gpc[0].error[-1] * 100 + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) print("> Checking file consistency...") files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") @@ -682,7 +1012,7 @@ def test_algorithms_006_RegAdaptive_anisotropic_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_006_RegAdaptiveAnisotropic_gpc' + test_name = 'test_algorithms_006_RegAdaptive_anisotropic_gpc' print(test_name) # Model @@ -763,7 +1093,74 @@ def test_algorithms_006_RegAdaptive_anisotropic_gpc(self): plot=plot) print("> Maximum NRMSD (gpc vs original): {:.2}%".format(np.max(nrmsd)*100)) - # self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + + def test_algorithms_006_RegAdaptive_anisotropic_gpc_NaN(self): + """ + Algorithm: RegAdaptive + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_006_RegAdaptive_anisotropic_gpc_NaN' + print(test_name) + + # Model + model = pygpc.testfunctions.Ishigami_NaN() + + # Problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[-np.pi, np.pi]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[-np.pi, np.pi]) + parameters["x3"] = 1. # pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[-np.pi, np.pi]) + parameters["a"] = 7. + parameters["b"] = 0.1 + + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["order_start"] = 0 + options["order_end"] = 20 + options["solver"] = "Moore-Penrose" + options["interaction_order"] = 2 + options["order_max_norm"] = 1.0 + options["n_cpu"] = 0 + options["adaptive_sampling"] = False + options["gradient_enhanced"] = False + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["eps"] = 0.0075 + options["basis_increment_strategy"] = "anisotropic" + options["matrix_ratio"] = 2 + + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} + + # define algorithm + algorithm = pygpc.RegAdaptive(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC session + session, coeffs, results = session.run() + + # read session + session = pygpc.read_session(fname=session.fn_session, folder=session.fn_session_folder) + nrmsd = session.gpc[0].error[-1] * 100 + + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) print("> Checking file consistency...") files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") @@ -779,7 +1176,7 @@ def test_algorithms_007_RegAdaptiveProjection_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_006_RegAdaptiveProjection_gpc' + test_name = 'test_algorithms_007_RegAdaptiveProjection_gpc' print(test_name) # define model @@ -811,11 +1208,8 @@ def test_algorithms_007_RegAdaptiveProjection_gpc(self): options["qoi"] = 0 options["error_type"] = "nrmsd" options["eps"] = 1e-3 - options["grid"] = pygpc.L1 - options["grid_options"] = {"method": "greedy", - "criterion": ["mc"], - "n_pool": 1000, - "seed": seed} + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} # define algorithm algorithm = pygpc.RegAdaptiveProjection(problem=problem, options=options) @@ -865,7 +1259,72 @@ def test_algorithms_007_RegAdaptiveProjection_gpc(self): plot=plot) print("> Maximum NRMSD (gpc vs original): {:.2}%".format(np.max(nrmsd)*100)) - # self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + + def test_algorithms_007_RegAdaptiveProjection_gpc_NaN(self): + """ + Algorithm: RegAdaptiveProjection + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_007_RegAdaptiveProjection_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.GenzOscillatory_NaN() + + # define problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1., 1.], pdf_limits=[0., 1.]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1., 1.], pdf_limits=[0., 1.]) + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["order_start"] = 2 + options["order_end"] = 12 + options["interaction_order"] = 2 + options["solver"] = "LarsLasso" + options["settings"] = None + options["seed"] = 1 + options["matrix_ratio"] = 10 + options["n_cpu"] = 0 + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["adaptive_sampling"] = False + options["gradient_enhanced"] = True + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["n_grid_gradient"] = 5 + options["qoi"] = 0 + options["error_type"] = "loocv" + options["eps"] = 1e-3 + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} + + # define algorithm + algorithm = pygpc.RegAdaptiveProjection(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC session + session, coeffs, results = session.run() + + # read session + session = pygpc.read_session(fname=session.fn_session, folder=session.fn_session_folder) + nrmsd = session.gpc[0].error[-1] * 100 + + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) print("> Checking file consistency...") files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") @@ -881,7 +1340,7 @@ def test_algorithms_008_MERegAdaptiveProjection_gpc(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_007_MERegAdaptiveProjection_gpc' + test_name = 'test_algorithms_008_MERegAdaptiveProjection_gpc' print(test_name) # define model @@ -910,7 +1369,7 @@ def test_algorithms_008_MERegAdaptiveProjection_gpc(self): options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} options["error_type"] = "nrmsd" options["error_norm"] = "absolute" - options["n_samples_validations"] = "absolute" + options["n_samples_validations"] = 1e3 options["qoi"] = 0 options["classifier"] = "learning" options["classifier_options"] = {"clusterer": "KMeans", @@ -924,7 +1383,7 @@ def test_algorithms_008_MERegAdaptiveProjection_gpc(self): options["fn_results"] = os.path.join(folder, test_name) options["save_session_format"] = save_session_format options["grid"] = pygpc.Random - options["grid_options"] = None + options["grid_options"] = {"seed": seed} # define algorithm algorithm = pygpc.MERegAdaptiveProjection(problem=problem, options=options) @@ -974,7 +1433,78 @@ def test_algorithms_008_MERegAdaptiveProjection_gpc(self): fn_out=options["fn_results"] + ".txt") print("> Maximum NRMSD (gpc vs original): {:.2}%".format(np.max(nrmsd)*100)) - # self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + self.expect_true(np.max(nrmsd) < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(np.max(nrmsd)*100)) + + print("> Checking file consistency...") + files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") + self.expect_true(files_consistent, error_msg) + + print("done!\n") + + def test_algorithms_008_MERegAdaptiveProjection_gpc_NaN(self): + """ + Algorithm: MERegAdaptiveProjection + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_algorithms_008_MERegAdaptiveProjection_gpc_NaN' + print(test_name) + + # define model + model = pygpc.testfunctions.DiscontinuousRidgeManufactureDecay_NaN() + + # define problem + parameters = OrderedDict() + parameters["x1"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 1]) + parameters["x2"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[0, 1]) + problem = pygpc.Problem(model, parameters) + + # gPC options + options = dict() + options["method"] = "reg" + options["solver"] = "LarsLasso" + options["settings"] = None + options["order_start"] = 3 + options["order_end"] = 15 + options["interaction_order"] = 2 + options["matrix_ratio"] = 2 + options["n_cpu"] = 0 + options["projection"] = True + options["adaptive_sampling"] = True + options["gradient_enhanced"] = True + options["gradient_calculation"] = "FD_fwd" + options["gradient_calculation_options"] = {"dx": 0.001, "distance_weight": -2} + options["error_type"] = "loocv" + options["error_norm"] = "absolute" + options["qoi"] = 0 + options["classifier"] = "learning" + options["classifier_options"] = {"clusterer": "KMeans", + "n_clusters": 2, + "classifier": "MLPClassifier", + "classifier_solver": "lbfgs"} + options["n_samples_discontinuity"] = 12 + options["eps"] = 0.75 + options["n_grid_init"] = 100 + options["backend"] = "omp" + options["fn_results"] = os.path.join(folder, test_name) + options["save_session_format"] = save_session_format + options["grid"] = pygpc.Random + options["grid_options"] = {"seed": seed} + + # define algorithm + algorithm = pygpc.MERegAdaptiveProjection(problem=problem, options=options) + + # Initialize gPC Session + session = pygpc.Session(algorithm=algorithm) + + # run gPC session + session, coeffs, results = session.run() + + nrmsd = session.gpc[0].error[0][0] * 100 + print("> Maximum NRMSD (gpc vs original): {:.2}%".format(nrmsd)) + self.expect_true(nrmsd < 0.1, 'gPC test failed with NRMSD error = {:1.2f}%'.format(nrmsd)) print("> Checking file consistency...") files_consistent, error_msg = pygpc.check_file_consistency(options["fn_results"] + ".hdf5") @@ -990,7 +1520,7 @@ def test_algorithms_009_clustering_3_domains(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_022_clustering_3_domains' + test_name = 'test_algorithms_009_clustering_3_domains' print(test_name) # define model @@ -1094,7 +1624,7 @@ def test_algorithms_010_Static_IO_gpc(self): Grid: Custom (Random) """ global folder, plot, save_session_format - test_name = 'pygpc_test_026_Static_IO_gpc' + test_name = 'test_algorithms_010_Static_IO_gpc' print(test_name) # define input data @@ -1175,7 +1705,7 @@ def test_algorithms_011_MEStatic_IO_gpc(self): Grid: Custom (Random) """ global folder, plot, save_session_format - test_name = 'pygpc_test_027_MEStatic_IO_gpc' + test_name = 'test_algorithms_011_MEStatic_IO_gpc' print(test_name) # define input data diff --git a/tests/test_grids.py b/tests/test_grids.py index 33ae0dad..1d492dc3 100755 --- a/tests/test_grids.py +++ b/tests/test_grids.py @@ -76,7 +76,7 @@ def test_grids_001_quadrature_grids(self): Testing Grids [TensorGrid, SparseGrid] """ global folder, plot - test_name = 'pygpc_test_010_quadrature_grids' + test_name = 'test_grids_001_quadrature_grids' print(test_name) # define testfunction @@ -104,7 +104,7 @@ def test_grids_002_random_grid(self): Testing Grids [Random] """ global folder, plot, seed - test_name = 'pygpc_test_011_random_grid' + test_name = 'test_grids_002_random_grid' print(test_name) # define testfunction @@ -237,7 +237,7 @@ def test_grids_003_LHS_grid(self): Testing Grids [LHS] """ global folder, plot, seed - test_name = 'pygpc_test_012_LHS_grid' + test_name = 'test_grids_003_LHS_grid' print(test_name) # define testfunction @@ -390,7 +390,7 @@ def test_grids_004_L1_grid(self): Testing Grids [L1] """ global folder, plot, seed - test_name = 'pygpc_test_013_L1_grid' + test_name = 'test_grids_004_L1_grid' print(test_name) # define testfunction diff --git a/tests/test_interface.py b/tests/test_interface.py index 6a42dcfd..1fd0ee31 100755 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -79,7 +79,7 @@ def test_interface_001_Matlab_gpc(self): Grid: Random """ global folder, plot, matlab, save_session_format - test_name = 'pygpc_test_020_Matlab_gpc' + test_name = 'test_interface_001_Matlab_gpc' print(test_name) if matlab: @@ -177,7 +177,7 @@ def test_interface_002_save_and_load_session(self): """ global folder, plot, save_session_format - test_name = 'pygpc_test_024_save_and_load_session' + test_name = 'test_interface_002_save_and_load_session' print(test_name) # define model model = pygpc.testfunctions.Peaks() diff --git a/tests/test_postprocessing.py b/tests/test_postprocessing.py index 244c40e5..e7aa1250 100755 --- a/tests/test_postprocessing.py +++ b/tests/test_postprocessing.py @@ -7,9 +7,10 @@ import shutil import unittest import numpy as np - -from scipy.integrate import odeint +import matplotlib.pyplot as plt from collections import OrderedDict +import matplotlib +matplotlib.use("Qt5Agg") # disable numpy warnings import warnings @@ -79,7 +80,7 @@ def test_postprocessing_001_random_vars_postprocessing(self): Grid: Random """ global folder, plot, save_session_format - test_name = 'pygpc_test_021_random_vars_postprocessing_sobol' + test_name = 'test_postprocessing_001_random_vars_postprocessing' print(test_name) # define model @@ -176,6 +177,131 @@ def test_postprocessing_001_random_vars_postprocessing(self): print("done!\n") + def test_postprocessing_002_plot_functions(self): + """ + Algorithm: Static + Method: Regression + Solver: Moore-Penrose + Grid: Random + """ + global folder, plot, save_session_format + test_name = 'test_postprocessing_002_plot_functions' + print(test_name) + + # %% + # At first, we are loading the model: + model = pygpc.testfunctions.Lorenz_System() + + # %% + # In the next step, we are defining the random variables (ensure that you are using an OrderedDict! Otherwise, + # the parameter can be mixed up during postprocessing because Python reorders the parameters in standard dictionaries!). + # Further details on how to define random variables can be found in the tutorial :ref:`How to define a gPC problem`. + parameters = OrderedDict() + parameters["sigma"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[10 - 1, 10 + 1]) + parameters["beta"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[28 - 10, 28 + 10]) + parameters["rho"] = pygpc.Beta(pdf_shape=[1, 1], pdf_limits=[(8 / 3) - 1, (8 / 3) + 1]) + + # %% + # To complete the parameter definition, we will also define the deterministic parameters, which are assumed to be + # constant during the uncertainty and sensitivity analysis: + parameters["x_0"] = 1.0 # initial value for x + parameters["y_0"] = 1.0 # initial value for y + parameters["z_0"] = 1.0 # initial value for z + parameters["t_end"] = 5.0 # end time of simulation + parameters["step_size"] = 0.05 # step size for differential equation integration + + # %% + # With the model and the parameters dictionary, the pygpc problem can be defined: + problem = pygpc.Problem(model, parameters) + + # %% + # Now we are ready to define the gPC options, like expansion orders, error types, gPC matrix properties etc.: + fn_results = "tmp/example_lorenz" + options = dict() + options["order_start"] = 6 + options["order_end"] = 20 + options["solver"] = "Moore-Penrose" + options["interaction_order"] = 2 + options["order_max_norm"] = 0.7 + options["n_cpu"] = 0 + options["error_type"] = 'nrmsd' + options["error_norm"] = 'absolute' + options["n_samples_validation"] = 1000 + options["matrix_ratio"] = 5 + options["fn_results"] = fn_results + options["eps"] = 0.01 + options["grid_options"] = {"seed": 1} + + # %% + # Now we chose the algorithm to conduct the gPC expansion and initialize the gPC Session: + algorithm = pygpc.RegAdaptive(problem=problem, options=options) + session = pygpc.Session(algorithm=algorithm) + + # %% + # Finally, we are ready to run the gPC. An .hdf5 results file will be created as specified in the options["fn_results"] + # field from the gPC options dictionary. + session, coeffs, results = session.run() + + # %% + # Postprocessing + # ^^^^^^^^^^^^^^ + # Postprocess gPC and add sensitivity coefficients to results .hdf5 file + pygpc.get_sensitivities_hdf5(fn_gpc=session.fn_results, + output_idx=None, + calc_sobol=True, + calc_global_sens=True, + calc_pdf=False) + + # extract sensitivity coefficients from results .hdf5 file + sobol, gsens = pygpc.get_sens_summary(fn_gpc=fn_results, + parameters_random=session.parameters_random, + fn_out=fn_results + "_sens_summary.txt") + + # plot time course of mean together with probability density, sobol sensitivity coefficients and global derivatives + t = np.arange(0.0, parameters["t_end"], parameters["step_size"]) + pygpc.plot_sens_summary(session=session, + coeffs=coeffs, + sobol=sobol, + gsens=gsens, + plot_pdf_over_output_idx=True, + qois=t, + mean=pygpc.SGPC.get_mean(coeffs), + std=pygpc.SGPC.get_std(coeffs), + x_label="t in s", + y_label="x(t)", + zlim=[0, 0.4], + fn_plot=fn_results + "_sens_summary_test_1.pdf") + + # plot time course of mean together with std, sobol sensitivity coefficients and global derivatives + pygpc.plot_sens_summary(session=session, + coeffs=coeffs, + sobol=sobol, + gsens=gsens, + plot_pdf_over_output_idx=False, + qois=t, + mean=pygpc.SGPC.get_mean(coeffs), + std=pygpc.SGPC.get_std(coeffs), + x_label="t in s", + y_label="x(t)", + fn_plot=fn_results + "_sens_summary_test_2.png") + + # plot sensitivities at one time point with donut plot + pygpc.plot_sens_summary(sobol=sobol, + gsens=gsens, + output_idx=50, + fn_plot=fn_results + "_sens_summary_test_3.png") + + # plot probability density of output over time (qoi) + pygpc.plot_gpc(session=session, + coeffs=coeffs, + output_idx="all", + zlim=[0, 0.4], + plot_pdf_over_output_idx=True, + qois=t, + x_label="t in s", + y_label="x(t)", + fn_plot=fn_results + "_plot_gpc_test_1.png") + if __name__ == '__main__': unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index ff854709..f6e1810d 100755 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -75,7 +75,7 @@ def test_utils_001_testfunctions(self): """ Testing testfunctions (multi-threading and inherited parallelization) """ - test_name = 'pygpc_test_008_testfunctions' + test_name = 'test_utils_001_testfunctions' print(test_name) tests = [] @@ -146,7 +146,7 @@ def test_utils_002_RandomParameters(self): Testing RandomParameters """ global folder, plot - test_name = 'pygpc_test_009_RandomParameters' + test_name = 'test_utils_002_RandomParameters' print(test_name) parameters = OrderedDict() @@ -177,7 +177,7 @@ def test_utils_003_backends(self): """ global folder, gpu - test_name = 'pygpc_test_023_backends' + test_name = 'test_utils_003_backends' print(test_name) backends = ["python", "cpu", "omp", "cuda"] @@ -280,7 +280,7 @@ def test_utils_004_gradient_estimation_methods(self): """ global folder, plot, save_session_format - test_name = 'pygpc_test_025_gradient_estimation_methods' + test_name = 'test_utils_004_gradient_estimation_methods' print(test_name) methods_options = dict()