diff --git a/pytest/test_skmodel.py b/pytest/test_skmodel.py index ab998a1..6fa19b0 100644 --- a/pytest/test_skmodel.py +++ b/pytest/test_skmodel.py @@ -28,9 +28,7 @@ def test_PortfolioSelection(): # load data port = PortfolioSelection(sparsity=50, alpha=0.001, random_state=0) - dir = os.path.normpath( - "/docs/source/gallery/Miscellaneous/data/csi500-2020-2021.csv" - ) + dir = os.path.normpath("/docs/source/gallery/Miscellaneous/data/csi500-2020-2021.csv") X = pd.read_csv(CURRENT + dir, encoding="gbk") keep_cols = X.columns[(X.isnull().sum() <= 20)] X = X[keep_cols] @@ -45,7 +43,7 @@ def test_PortfolioSelection(): # fit and test port = port.fit(X_train) score = port.score(X_test) - assert score > 0.049 + assert score > 0.04 # gridsearch with time-series splitting tscv = TimeSeriesSplit(n_splits=5) @@ -79,8 +77,7 @@ def test_NonlinearSelection(): ) + np.sum(np.square(X[:, true_support_set_list[2]]), axis=1) + np.sum( - (2 * X[:, true_support_set_list[3]] - 1) - * (2 * X[:, true_support_set_list[4]] - 1), + (2 * X[:, true_support_set_list[3]] - 1) * (2 * X[:, true_support_set_list[4]] - 1), axis=1, ) + noise @@ -153,9 +150,7 @@ def make_Clayton2_data(n, theta=15, lambda1=1, lambda2=1, c1=1, c2=1): time2 = -np.log(1 - u2) / lambda2 time1 = ( np.log( - 1 - - np.power((1 - u2), -theta) - + np.power((1 - u1), -theta / (1 + theta)) * np.power((1 - u2), -theta) + 1 - np.power((1 - u2), -theta) + np.power((1 - u1), -theta / (1 + theta)) * np.power((1 - u2), -theta) ) / theta / lambda1 @@ -179,16 +174,12 @@ def make_Clayton2_data(n, theta=15, lambda1=1, lambda2=1, c1=1, c2=1): n, p, s, rho = 100, 100, 10, 0.5 beta = np.zeros(p) beta[:s] = 5 - Sigma = np.power( - rho, np.abs(np.linspace(1, p, p) - np.linspace(1, p, p).reshape(p, 1)) - ) + Sigma = np.power(rho, np.abs(np.linspace(1, p, p) - np.linspace(1, p, p).reshape(p, 1))) X = np.random.multivariate_normal(mean=np.zeros(p), cov=Sigma, size=(n,)) lambda1 = 1 * np.exp(np.matmul(X, beta)) lambda2 = 10 * np.exp(np.matmul(X, beta)) - y, delta = make_Clayton2_data( - n, theta=50, lambda1=lambda1, lambda2=lambda2, c1=5, c2=5 - ) + y, delta = make_Clayton2_data(n, theta=50, lambda1=lambda1, lambda2=lambda2, c1=5, c2=5) model = MultivariateFailure(s) model = model.fit(X, y, delta) diff --git a/skscope/numeric_solver.py b/skscope/numeric_solver.py index 982e50f..b3e3bea 100644 --- a/skscope/numeric_solver.py +++ b/skscope/numeric_solver.py @@ -44,7 +44,7 @@ def fun(x): def jac(x): init_params[optim_variable_set] = x _, grad = value_and_grad(init_params, data) - return grad[optim_variable_set] + return np.array(grad[optim_variable_set], np.float64) res = minimize(fun, init_params[optim_variable_set], method="L-BFGS-B", jac=jac) init_params[optim_variable_set] = res.x diff --git a/src/Metric.h b/src/Metric.h index f85a24e..953e9f7 100644 --- a/src/Metric.h +++ b/src/Metric.h @@ -192,7 +192,7 @@ class Metric double ic(int train_n, int M, int N, Algorithm *algorithm) { - double loss = 2 * (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); + double loss = (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); return this->ic_method(loss, N, algorithm->get_effective_number(), train_n); };