Skip to content

Commit

Permalink
Add logger
Browse files Browse the repository at this point in the history
  • Loading branch information
Philoso-Fish committed Jul 29, 2021
1 parent 60770f9 commit 7a46567
Show file tree
Hide file tree
Showing 13 changed files with 53 additions and 22 deletions.
15 changes: 15 additions & 0 deletions carla/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
# flake8: noqa
# isort:skip
import logging

from ._logger import INFOFORMATTER

log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)

# defines the stream handler
_ch = logging.StreamHandler()
_ch.setLevel(logging.INFO)
_ch.setFormatter(logging.Formatter(INFOFORMATTER))

# adds the handler to the global variable: log
log.addHandler(_ch)

from ._version import __version__
from .data import Data, DataCatalog
Expand Down
5 changes: 5 additions & 0 deletions carla/_logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
DEBUGFORMATTER = "%(filename)s:%(name)s:%(funcName)s:%(lineno)d: %(message)s"
"""Debug file formatter."""

INFOFORMATTER = "%(message)s"
"""Log file and stream output formatter."""
13 changes: 7 additions & 6 deletions carla/recourse_methods/autoencoder/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from torch import optim
from tqdm import trange

from carla import log
from carla.recourse_methods.autoencoder.dataloader import VAEDataset
from carla.recourse_methods.autoencoder.losses import binary_crossentropy, csvae_loss
from carla.recourse_methods.autoencoder.save_load import get_home
Expand Down Expand Up @@ -306,19 +307,19 @@ def fit(

ELBO[epoch] = train_loss / train_loss_num
if epoch % 10 == 0:
print(
log.info(
"[Epoch: {}/{}] [objective: {:.3f}]".format(
epoch, epochs, ELBO[epoch, 0]
)
)

ELBO_train = ELBO[epoch, 0].round(2)
print("[ELBO train: " + str(ELBO_train) + "]")
log.info("[ELBO train: " + str(ELBO_train) + "]")
del MU_X_eval, MU_Z_eval, Z_ENC_eval
del LOG_VAR_X_eval, LOG_VAR_Z_eval

self.save()
print("Training finished")
log.info("Training finished")

def load(self, input_shape):
cache_path = get_home()
Expand Down Expand Up @@ -541,19 +542,19 @@ def fit(self, data, lambda_reg=None, epochs=100, lr=1e-3, batch_size=32):
train_x_recon_losses.append(x_recon_loss_val.item())
train_y_recon_losses.append(y_recon_loss_val.item())

print(
log.info(
"epoch {}: x recon loss: {}".format(
i, np.mean(np.array(train_x_recon_losses))
)
)
print(
log.info(
"epoch {}: y recon loss: {}".format(
i, np.mean(np.array(train_y_recon_losses))
)
)

self.save()
print("Training finished")
log.info("Training finished")

def save(self):
cache_path = get_home()
Expand Down
9 changes: 5 additions & 4 deletions carla/recourse_methods/catalog/actionable_recourse/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import recourse as rs
from lime.lime_tabular import LimeTabularExplainer

from carla import log
from carla.recourse_methods.processing import encode_feature_names

from ...api import RecourseMethod
Expand Down Expand Up @@ -184,9 +185,9 @@ def get_counterfactuals(self, factuals: pd.DataFrame) -> pd.DataFrame:

# Check if we need lime to build coefficients
if (coeffs is None) and (intercepts is None):
print("Start generating LIME coefficients")
log.info("Start generating LIME coefficients")
coeffs, intercepts = self._get_lime_coefficients(factuals_enc_norm)
print("Finished generating LIME coefficients")
log.info("Finished generating LIME coefficients")
else:
# Local explanations via LIME generate coeffs and intercepts per instance, while global explanations
# via input parameter need to be set into correct shape [num_of_instances, num_of_features]
Expand Down Expand Up @@ -223,12 +224,12 @@ def get_counterfactuals(self, factuals: pd.DataFrame) -> pd.DataFrame:
try:
fs_pop = fs.populate(total_items=self._fs_size)
except (ValueError, KeyError):
print(
log.info(
"Actionable Recours is not able to produce a counterfactual explanation for instance {}".format(
index
)
)
print(row.values)
log.info(row.values)
cfs.append(counterfactual)
continue

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from torch.distributions import kl_divergence
from torch.distributions.normal import Normal

from carla import log
from carla.recourse_methods.catalog.clue.library.clue_ml.src.gauss_cat import *
from carla.recourse_methods.catalog.clue.library.clue_ml.src.probability import (
normal_parse_params,
Expand Down Expand Up @@ -140,7 +141,7 @@ def create_net(self):
if self.cuda:
self.model = self.model.cuda()
cudnn.benchmark = True
print(" Total params: %.2fM" % (self.get_nb_parameters() / 1000000.0))
log.info(" Total params: %.2fM" % (self.get_nb_parameters() / 1000000.0))

def create_opt(self):
self.optimizer = RAdam(self.model.parameters(), lr=self.lr)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import torch.nn.functional as F
from torch.optim import Adam

from carla import log
from carla.recourse_methods.catalog.clue.library.clue_ml.src.probability import (
decompose_entropy_cat,
decompose_std_gauss,
Expand Down Expand Up @@ -445,7 +446,7 @@ def update_stopvec(cost_vec, it_mask, step_idx, n_early_stop, min_steps):
it_mask[to_mask == 1] = step_idx

if (it_mask == 0).sum() == 0 and n_early_stop > 0:
print("it %d, all conditions met, stopping" % step_idx)
log.info("it %d, all conditions met, stopping" % step_idx)
return it_mask

@staticmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import torch
from torch.autograd import Variable

from carla import log
from carla.visualisation import cprint

try:
Expand Down Expand Up @@ -100,7 +101,7 @@ def update_lr(self, epoch, gamma=0.99):
if self.schedule is not None:
if len(self.schedule) == 0 or epoch in self.schedule:
self.lr *= gamma
print("learning rate: %f (%d)\n" % (self.lr, epoch))
log.info("learning rate: %f (%d)\n" % (self.lr, epoch))
for param_group in self.optimizer.param_groups:
param_group["lr"] = self.lr

Expand All @@ -123,7 +124,7 @@ def load(self, filename):
self.lr = state_dict["lr"]
self.model = state_dict["model"]
self.optimizer = state_dict["optimizer"]
print(" restoring epoch: %d, lr: %f" % (self.epoch, self.lr))
log.info(" restoring epoch: %d, lr: %f" % (self.epoch, self.lr))
return self.epoch


Expand Down
2 changes: 1 addition & 1 deletion carla/recourse_methods/catalog/crud/library/crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,6 @@ def counterfactual_search(
np_counterfactuals = torch_counterfactuals.cpu().detach().numpy()
np_distances = torch_distances.cpu().detach().numpy()
index = np.argmin(np_distances)
print("Coutnerfactual found")
print("Counterfactual found")

return np_counterfactuals[index].squeeze(axis=0)
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
import pandas as pd
from numpy import linalg as LA

from carla import log


def hyper_sphere_coordindates(n_search_samples, instance, high, low, p_norm=2):

Expand Down Expand Up @@ -137,7 +139,7 @@ def growing_spheres_search(
(candidate_counterfactuals.values - instance_replicated)
).sum(axis=1)
else:
print("Distance not defined yet")
log.info("Distance not defined yet")

# counterfactual labels
y_candidate_logits = model.predict_proba(candidate_counterfactuals.values)
Expand Down
5 changes: 3 additions & 2 deletions carla/recourse_methods/catalog/revise/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import torch
from torch import nn

from carla import log
from carla.data.api import Data
from carla.models.api import MLModel
from carla.recourse_methods.api import RecourseMethod
Expand Down Expand Up @@ -216,14 +217,14 @@ def _counterfactual_optimization(self, cat_features_indices, device, df_fact):

# Choose the nearest counterfactual
if len(candidate_counterfactuals):
print("Counterfactual found!")
log.info("Counterfactual found!")
array_counterfactuals = np.array(candidate_counterfactuals)
array_distances = np.array(candidate_distances)

index = np.argmin(array_distances)
list_cfs.append(array_counterfactuals[index])
else:
print("No counterfactual found")
log.info("No counterfactual found")
list_cfs.append(query_instance.cpu().detach().numpy().squeeze(axis=0))
return list_cfs

Expand Down
5 changes: 3 additions & 2 deletions carla/recourse_methods/catalog/wachter/library/wachter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from torch import nn
from torch.autograd import Variable

from carla import log
from carla.recourse_methods.processing import reconstruct_encoding_constraints

DECISION_THRESHOLD = 0.5
Expand Down Expand Up @@ -108,8 +109,8 @@ def wachter_recourse(
lamb -= 0.05

if datetime.datetime.now() - t0 > t_max:
print("Timeout - No Counterfactual Explanation Found")
log.info("Timeout - No Counterfactual Explanation Found")
break
elif f_x_new >= 0.5:
print("Counterfactual Explanation Found")
log.info("Counterfactual Explanation Found")
return x_new_enc.cpu().detach().numpy().squeeze(axis=0)
4 changes: 3 additions & 1 deletion carla/visualisation/output.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import sys

from carla import log


def cprint(color, text, **kwargs):
if color[0] == "*":
Expand All @@ -17,5 +19,5 @@ def cprint(color, text, **kwargs):
"c": "36",
"w": "37",
}
print("\x1b[%s%sm%s\x1b[0m" % (pre_code, code[color], text), **kwargs)
log.info("\x1b[%s%sm%s\x1b[0m" % (pre_code, code[color], text), **kwargs)
sys.stdout.flush()
2 changes: 1 addition & 1 deletion test/test_cfmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def test_face_get_counterfactuals(model_type):
test_factual = factuals.iloc[:2]

# Test for knn mode
hyperparams = {"mode": "knn", "fraction": 0.10}
hyperparams = {"mode": "knn", "fraction": 0.05}
face = Face(model_tf, hyperparams)
df_cfs = face.get_counterfactuals(test_factual)

Expand Down

0 comments on commit 7a46567

Please sign in to comment.