-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathrun_chaotic.py
158 lines (131 loc) · 6.45 KB
/
run_chaotic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
#check
import os
import gc
import torch
import numpy as np
import time
from dysts.flows import *
from pysindy import SmoothedFiniteDifference
from utils.data import add_noise
from utils.log_ import create_dir_if_not_exist, save_pareto_frontier_to_csv
from model.regressor import PSRN_Regressor
import click
@click.command()
@click.option('--gpu_index','-g',default=0,type=int,help='gpu index used')
@click.option('--n_runs','-r',default=50,type=int,help='number of runs for each puzzle')
def main(gpu_index, n_runs):
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_index)
class_name_ls = open('./data/dystsnames/dysts_16_flows.txt').readlines()
class_name_ls = [line.strip() for line in class_name_ls]
print(class_name_ls)
# Number of random seeds for each chaotic dynamic
n_seeds = n_runs
# Chaotic dynamic data generation
sample_size = 1000
pts_per_period = 100
noise_level = 0.01
down_sample = 200
# PTS settings
ops = ['Add','Mul','SemiSub','SemiDiv',
'Identity','Sin','Cos','Exp',
'Log','Tanh','Cosh','Abs','Sign']
N_SIMU = 3
top_k = 30
postprocess = False
for seed in range(n_seeds):
for class_name in class_name_ls:
# Convert to autonomous system
dysts_model = eval(class_name)
dysts_model = dysts_model()
if class_name == 'ForcedBrusselator':
dysts_model.f = 0
elif class_name == 'Duffing':
dysts_model.gamma = 0
elif class_name == 'ForcedVanDerPol':
dysts_model.a = 0
elif class_name == 'ForcedFitzHughNagumo':
dysts_model.f = 0
# clear PSRN's VRAM memory
gc.collect()
np.random.seed(seed) # setting random seed
try:
print('=== START SEARCHING FOR GT MODEL: ===')
print(dysts_model)
# Gen traj data using `dysts` library
t, data = dysts_model.make_trajectory(sample_size,
return_times=True,
pts_per_period=pts_per_period,
resample=True,
noise=0,
postprocess=postprocess)
# introduce gaussian noise
for k in range(data.shape[1]):
data[:,k:k+1] = add_noise(data[:,k:k+1], noise_level, seed)
# Make derivation for each variable
t = t.flatten()
dim = data.shape[1]
print('dim',dim)
if dim == 3:
vars = ['x','y','z']
dotsvarnames = ['xdot','ydot','zdot']
deriv_idxs = [0,1,2]
trying_const_num = 2
n_inputs = 5
elif dim == 4:
vars = ['x','y','z','w']
dotsvarnames = ['xdot','ydot','zdot','wdot']
deriv_idxs = [0,1,2,3]
trying_const_num = 1
n_inputs = 5
else:
continue
sfd = SmoothedFiniteDifference(smoother_kws={'window_length': 5})
data_deriv = np.zeros((data.shape[0],len(deriv_idxs)))
for i,idx in enumerate(deriv_idxs):
print(data[:,idx:idx+1].shape,t.shape)
deriv_data_i = sfd._differentiate(data[:,idx:idx+1], t)
data_deriv[:,i:i+1] = deriv_data_i
for idxdot, vardotname in enumerate(dotsvarnames):
p = './log/chaotic/{}/{}/'.format(class_name, vardotname)
if class_name == 'DequanLi' and vardotname == 'xdot':
simu = N_SIMU * 4
else:
simu = N_SIMU
if os.path.exists(p+'pf_{}.csv'.format(seed)):
print('exist {}, skip.'.format(p+'pf_{}.csv').format(seed))
continue
Input = torch.from_numpy(data).to(torch.float32).to('cuda')
Output = torch.from_numpy(data_deriv[:,idxdot:idxdot+1]).to(torch.float32).to('cuda')
qsrn_regressor = PSRN_Regressor(variables=vars,
operators=ops,
n_symbol_layers=3,
n_inputs=n_inputs,
use_const=True,
trying_const_num=trying_const_num,
trying_const_range=[0,3],
trying_const_n_try=1,
device='cuda',
)
print(Input.shape, Output.shape)
start_time = time.time()
flag, pareto = qsrn_regressor.fit(Input,
Output,
n_down_sample=down_sample,
n_step_simulation=simu,
use_threshold=False,
real_time_display_freq=5,
prun_ndigit=2,
top_k=top_k,
)
end_time = time.time()
time_cost = end_time - start_time
print('time_cost',time_cost)
create_dir_if_not_exist(p)
with open(p+'time.txt','a') as f:
f.write(str(time_cost)+'\n')
save_pareto_frontier_to_csv(p+'pf_{}.csv'.format(seed),pareto_ls=pareto)
except np.linalg.LinAlgError:
print('np.linalg.LinAlgError (Intel bug)')
continue
if __name__ == '__main__':
main()