-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathETR.py
47 lines (41 loc) · 2.06 KB
/
ETR.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import sys
from sklearn import preprocessing
import sklearn.metrics
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.kernel_approximation import PolynomialCountSketch
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import MinMaxScaler, Normalizer
from tpot.builtins import StackingEstimator, ZeroCount
csv_path = sys.argv[1]
data = pd.read_csv(csv_path, sep=',', encoding='ISO-8859-1')
features_saved = ['Chla_center', 'Tw_center', 'Kd_center', 'PAR_center', 'Chla*Tw', 'Log(Chla*Tw)', 'Chla*Kd', 'TW*Kd', 'log10(Tw*Kd)', 'Kd*Par', 'WS_m/s']
target = 'Log10Fm_¦Ìmol/m2/d'
pd_target = data[target]
pd_features = data.drop(columns=[target])
pd_features_cols = list(pd_features.columns.values)
for col in pd_features_cols:
if col not in features_saved:
pd_features = pd_features.drop(columns=[col])
continue
training_features, testing_features, training_target, testing_target = train_test_split(pd_features, pd_target, random_state=42)
# Average CV score on the training set was: -0.3487751537281346
exported_pipeline = make_pipeline(
MinMaxScaler(),
Normalizer(norm="l1"),
ZeroCount(),
StackingEstimator(estimator=RidgeCV()),
PolynomialCountSketch(degree=4),
ExtraTreesRegressor(bootstrap=True, max_features=0.8500000000000001, min_samples_leaf=2, min_samples_split=3, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
print("MSE:", round(sklearn.metrics.mean_squared_error(testing_target, results), 4))
print("RMSE:", round(sklearn.metrics.mean_squared_error(testing_target, results, squared=False), 4))
print("MAE:", round(sklearn.metrics.mean_absolute_error(testing_target, results), 4))
print("R2:", round(sklearn.metrics.r2_score(testing_target, results), 4))
print("The difference between the true value and the predicted value:")
print(testing_target.values - results)