From 11b48f4a614875a123cb8b8b6c6bbce4a636157d Mon Sep 17 00:00:00 2001 From: Julian Slane Date: Tue, 8 Oct 2024 16:01:42 -0700 Subject: [PATCH 1/2] Add help tab to toolbar --- constrain/app/app.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/constrain/app/app.py b/constrain/app/app.py index 45004f95..64e1a4e1 100644 --- a/constrain/app/app.py +++ b/constrain/app/app.py @@ -16,8 +16,8 @@ QFileDialog, QMessageBox, ) -from PyQt6.QtCore import Qt, QRectF -from PyQt6.QtGui import QAction, QPixmap, QPainter, QColor +from PyQt6.QtCore import Qt, QRectF, QUrl +from PyQt6.QtGui import QAction, QPixmap, QPainter, QColor, QDesktopServices from constrain.app.import_form import ImportForm from constrain.app.meta_form import MetaForm @@ -140,8 +140,26 @@ def initialize_toolbar(self): settings_menu.addMenu(popup_settings_menu) + help_menu = QMenu("Help", self) + + open_github_action = QAction("GitHub Repository", self) + open_github_action.triggered.connect(self.open_github) + + open_docs_action = QAction("Documentation", self) + open_docs_action.triggered.connect(self.open_docs) + + help_menu.addAction(open_github_action) + help_menu.addAction(open_docs_action) + toolbar.addAction(file_menu.menuAction()) toolbar.addAction(settings_menu.menuAction()) + toolbar.addAction(help_menu.menuAction()) + + def open_docs(self): + QDesktopServices.openUrl(QUrl("https://pnnl.github.io/ConStrain/index.html")) + + def open_github(self): + QDesktopServices.openUrl(QUrl("https://github.com/pnnl/ConStrain")) def basicPopupSetting(self): self.states_form.setting = "basic" From d31a5378a30b04dc1f58e17839e859bfe6bcfa6f Mon Sep 17 00:00:00 2001 From: Julian Slane Date: Wed, 9 Oct 2024 15:16:36 -0700 Subject: [PATCH 2/2] Add tooltips, fix doc formatting --- constrain/api/data_processing.py | 6 +- constrain/api/reporting.py | 6 +- constrain/api/verification.py | 7 +- constrain/api/verification_case.py | 84 ++++- constrain/api/verification_library.py | 16 +- constrain/app/app.py | 11 + constrain/app/extract.py | 193 ++++++++++++ constrain/app/list_and_choice_popups.py | 2 +- constrain/app/popup_window.py | 19 +- constrain/app/schema.json | 402 ++++++++++++++++++++++++ 10 files changed, 707 insertions(+), 39 deletions(-) create mode 100644 constrain/app/extract.py create mode 100644 constrain/app/schema.json diff --git a/constrain/api/data_processing.py b/constrain/api/data_processing.py index 8e01fdca..942fda6c 100644 --- a/constrain/api/data_processing.py +++ b/constrain/api/data_processing.py @@ -26,9 +26,9 @@ def __init__( """Instantiate a data processing object to load datasets and manipulate data before feeding it to the verification process. Args: - data (str): Path to the data (CSV format) to be loaded for processing. - data_source (str): Data source name. Use `EnergyPlus` or `Other`. - timestamp_column_name (str): Name of the column header that contains the time series timestamps. + data (str, optional): Path to the data (CSV format) to be loaded for processing. + data_source (str, optional): Data source name. Use `EnergyPlus` or `Other`. + timestamp_column_name (str, optional): Name of the column header that contains the time series timestamps. """ self.data = None diff --git a/constrain/api/reporting.py b/constrain/api/reporting.py index 63d1bed4..9eb5e07d 100644 --- a/constrain/api/reporting.py +++ b/constrain/api/reporting.py @@ -23,9 +23,9 @@ def __init__( ) -> None: """ Args: - verification_json (str): Path to the result json files after verifications to be loaded for reporting. It can be one JSON file or wildcard for multiple JSON files (e.g., *_md.json). - result_md_name (str): Name of the report summary markdown to be saved. All md reports will be created in the same directory as the verification result json files. - report_format (str): File format to be output. For now, only `markdown` format is available. More formats (e.g., html, pdf, csv, etc.) will be added in future releases. + verification_json (str, optional): Path to the result json files after verifications to be loaded for reporting. It can be one JSON file or wildcard for multiple JSON files (e.g., *_md.json). + result_md_name (str, optional): Name of the report summary markdown to be saved. All md reports will be created in the same directory as the verification result json files. + report_format (str, optional): File format to be output. For now, only `markdown` format is available. More formats (e.g., html, pdf, csv, etc.) will be added in future releases. """ # TODO: diff --git a/constrain/api/verification.py b/constrain/api/verification.py index df34ba8e..a211c2f5 100644 --- a/constrain/api/verification.py +++ b/constrain/api/verification.py @@ -19,6 +19,11 @@ class Verification: def __init__(self, verifications: VerificationCase = None): + """Instantiate a Verification object. + + Args: + verficiations (VerificationCase, optional): a VerificationCase + """ self.lib_classes_py_file = None self.preprocessed_data = None self.cases = None @@ -147,7 +152,7 @@ def run_single_verification(self, case: dict = None) -> None: """Run a single verification and generate a json file containing markdown report string and other results info. Args: - case (dict): Verification case dictionary. + case (Dict): Verification case dictionary. """ # Input validation if case is None: diff --git a/constrain/api/verification_case.py b/constrain/api/verification_case.py index ce72bfd4..c4daff8d 100644 --- a/constrain/api/verification_case.py +++ b/constrain/api/verification_case.py @@ -16,8 +16,8 @@ def __init__(self, cases: List = None, json_case_path: str = None) -> None: """Instantiate a verification case class object and load verification case(s) in `self.case_suite` as a Dict. keys are automatically generated unique id of cases, values are the fully defined verification case Dict. If any argument is invalid, the object instantion will report an error message. Args: - cases: (optional) A list of Dict. dictionary that includes verification case(s). - json_case_path: (optional) str. path to the verification case file. If the path ends with `*.json`, then the items in the JSON file are loaded. If the path points to a directory, then verification cases JSON files are loaded. + cases (List, optional): A list of Dict. dictionary that includes verification case(s). + json_case_path (str, optional): Path to the verification case file. If the path ends with `*.json`, then the items in the JSON file are loaded. If the path points to a directory, then verification cases JSON files are loaded. """ self.case_suite = {} @@ -56,10 +56,10 @@ def load_verification_cases_from_json( """Add verification cases from specified json file into self.case_suite. Cases that have already been loaded are ignored. Args: - json_case_path: str, path to the json file containing fully defined verification cases. + json_case_path (str): path to the json file containing fully defined verification cases. Returns: - List, unique ids of verification cases loaded in self.case_suite + List: unique ids of verification cases loaded in self.case_suite """ # check `json_case_path` type @@ -80,8 +80,8 @@ def save_case_suite_to_json( """Save verification cases to a dedicated file. If the `case_ids` argument is empty, all the cases in `self.case_suite` is saved. If `case_ids` includes specific cases' hash, only the hashes in the list are saved. Args: - json_path: str. path to the json file to save the cases. - case_ids: (optional) List. Unique ids of verification cases to save. By default, save all cases in `self.case_suite`. Default to an empty list. + json_path (str): Path to the json file to save the cases. + case_ids (List, optional): Unique ids of verification cases to save. By default, save all cases in `self.case_suite`. Default to an empty list. """ if case_ids is None: @@ -118,12 +118,12 @@ def create_verification_case_suite_from_base_case( """Create slightly different multiple verification cases by changing keys and values as specified in `update_key_value`. if `keep_base_case` is set to True, the `base_case` is added to the first element in the returned list. Args: - base_case: Dict. base verification input information. - update_key_value: Dict. the same format as the `base_case` arg, but the updating fields consist of a list of values to be populated with. - keep_base_case: (optional) bool. whether to keep the base case in returned list of verification cases. Default to False. + base_case (Dict): base verification input information. + update_key_value (Dict): the same format as the `base_case` arg, but the updating fields consist of a list of values to be populated with. + keep_base_case (bool, optional): whether to keep the base case in returned list of verification cases. Default to False. Returns: - List, A list of Dict, each dict is a generated case from the base case. + List: A list of Dict, each dict is a generated case from the base case. """ # return all the updating value lists' length @@ -221,11 +221,11 @@ def validate_verification_case_structure( """Validate verification case structure (e.g., check whether `run_simulation`, `simulation_IO`, etc. exist or not). Check if required key / values pairs exist in the case. check if datatype of values are appropriate, e.g. file path is str. Args: - case: dict. case information that will be validated. - verbose: bool. whether to output verbose information. Default to False. + case (Dict): case information that will be validated. + verbose (bool): whether to output verbose information. Default to False. Returns: - Bool, indicating whether the case structure is valid or not. + bool: indicating whether the case structure is valid or not. """ def _validate_case_structure_helper(schema, instance, verbose) -> Union[bool]: @@ -320,8 +320,8 @@ def save_verification_cases_to_json( """Save verification cases to a dedicated file. The cases list consists of verification case dicts. Args: - json_path: str. json file path to save the cases. - cases: List. List of complete verification cases Dictionary to save. + json_path (str): json file path to save the cases. + cases (List): List of complete verification cases Dictionary to save. """ # check `json_path` type if not isinstance(json_path, str): @@ -350,6 +350,14 @@ def save_verification_cases_to_json( json.dump(case_suite_in_template_format, fw, indent=4) def read_case(self, file_name: str) -> List: + """Reads cases from file + + Args: + file_name (str): file name to read from + + Returns: + List: cases + """ # load the cases from file_path with open(file_name, "r") as f: loaded_cases = json.load(f) @@ -367,6 +375,16 @@ def read_case(self, file_name: str) -> List: @staticmethod def same_case(case_a: {}, case_b: {}, ignored_keys=["case_id_in_suite"]) -> bool: + """Returns whether two cases have the same items, ignoring a given list of keys + + Args: + case_a (Dict): First case in check + case_b (Dict): Second case in check + ignored_keys (List, optional): keys to ignore, defaults to ["case_id_in_suite"] + + Returns: + bool: True if cases are the same, False otherwise + """ case_a_new = {k: v for k, v in case_a.items() if k not in ignored_keys} case_b_new = {k: v for k, v in case_b.items() if k not in ignored_keys} return case_a_new == case_b_new @@ -374,6 +392,15 @@ def same_case(case_a: {}, case_b: {}, ignored_keys=["case_id_in_suite"]) -> bool def case_already_in_suite( self, case: {}, ignored_keys=["case_id_in_suite"] ) -> bool: + """Returns whether or not case is already in case suite, ignoring a given list of keys + + Args: + case (Dict): case in question + ignored_keys (List, optional): keys to ignore, defaults to ["case_id_in_suite"] + + Returns: + bool: True if case is in suite, False otherwise + """ for k, v in self.case_suite.items(): if self.same_case(case, v, ignored_keys=ignored_keys): return True @@ -381,12 +408,30 @@ def case_already_in_suite( @staticmethod def check_json_path_type(json_path: str) -> bool: + """Checks whether path is a json file + + Args: + json_path (str): path to check + + Returns: + bool: True if path is a .json, False otherwise + """ return True if json_path[-5:] == ".json" else False @staticmethod def check_type( var_name: str, var_value: Union[str, list, dict], var_type: type ) -> bool: + """Returns whether value is of a given type + + Args: + var_name (str): name of variable + var_value (str, List, Dict): value of variable + var_type (type): type of variable + + Returns: + bool: True if value is of given type, False otherwise + """ if var_value is None: # no error msg if None return False @@ -400,6 +445,15 @@ def check_type( @staticmethod def check_file(file_path_name: str, file_path: str) -> bool: + """Checks whether file exists + + Args: + file_path_name (str): name of file path + file_path (str): file path + + Returns: + bool: True if file path exists, False otherwise + """ if os.path.isfile(file_path): return True else: diff --git a/constrain/api/verification_library.py b/constrain/api/verification_library.py index 5ef2d3e3..d0ceff6d 100644 --- a/constrain/api/verification_library.py +++ b/constrain/api/verification_library.py @@ -120,10 +120,10 @@ def validate_library(self, items: List[str] = None) -> Dict: """Check the validity of library items definition. This validity check includes checking the completeness of json specification (against library json schema) and Python verification class definition (against library class interface) and the match between the json and python implementation. Args: - items: list of str, default []. Library items to validate. `items` must be filled with valid verification item(s). If not, an error occurs. + items (List): Library items to validate. `items` must be filled with valid verification item(s). If not, an error occurs. Defaults to empty list. Returns: - Dict that contains validity information of library items. + Dict: Dict that contains validity information of library items. """ # check `items` type @@ -191,10 +191,10 @@ def get_library_items(self, items: List[str] = []) -> Union[List, None]: """Get the json definition and meta information of a list of specific library items. Args: - items: list of str, default []. Library items to get. By default, get all library items loaded at instantiation. + items (List, optional): Library items to get. By default, get all library items loaded at instantiation. Defaults to empty list. Returns: - list of `Dict` with four specific keys: + List: list of `Dict` with four specific keys: - `library_item_name`: unique str name of the library item. - `library_json`: library item json definition in the library json file. - `library_json_path`: path of the library json file that contains this library item. @@ -226,10 +226,10 @@ def get_applicable_library_items_by_datapoints( """Based on provided datapoints lists, identify potentially applicable library items from all loaded items. Use this function with caution as it 1) requires aligned data points naming across all library items; 2) does not check the topological relationships between datapoints. Args: - datapoints: list of str datapoints names. + datapoints (List): Datapoints names. Returns: - Dict with keys being the library item names and values being the required datapoints for the corresponding keys. + Dict: Dict with keys being the library item names and values being the required datapoints for the corresponding keys. """ # check `datapoints` type @@ -271,10 +271,10 @@ def get_required_datapoints_by_library_items( """Summarize datapoints that need to be used to support specified library items. Use this function with caution as it 1) requires aligned data points naming across all library items; 2) does not check the topological relationships between datapoints. Args: - items: list of str, default []. Library items to summarize datapoints from. By default, summarize all library items loaded at instantiation. + items (List): Library items to summarize datapoints from. By default, summarize all library items loaded at instantiation. Defaults to empty list. Returns: - Dict with keys being the datapoint name and values being a sub Dict with the following keys: + Dict: Dict with keys being the datapoint name and values being a sub Dict with the following keys: - number_of_items_using_this_datapoint: int, number of library items that use this datapoint. - library_items_list: List, of library item names that use this datapoint. """ diff --git a/constrain/app/app.py b/constrain/app/app.py index 64e1a4e1..557d8b9d 100644 --- a/constrain/app/app.py +++ b/constrain/app/app.py @@ -52,6 +52,17 @@ def initialize_ui(self): ) self.column_list.setCurrentItem(self.column_list.item(0)) + meta_item = self.column_list.item(0) + meta_item.setToolTip("Metadata about the workflow") + + import_item = self.column_list.item(1) + import_item.setToolTip("Python package import needed to run the workflow") + + state_item = self.column_list.item(2) + state_item.setToolTip( + "Sequential steps to follow to perform the verification; 'states' can either be 'MethodCall' which represent a method call to one of ConStrain’s APIs or a 'Choice' which can be used to help define alternative steps in a workflow based on the result (referred to as payloads in a workflow)." + ) + # make and reposition frame containing meta, imports, and state self.column_frame = QFrame() self.column_frame.setFrameStyle(QFrame.Shape.NoFrame) diff --git a/constrain/app/extract.py b/constrain/app/extract.py new file mode 100644 index 00000000..b6d366cb --- /dev/null +++ b/constrain/app/extract.py @@ -0,0 +1,193 @@ +import ast +from pathlib import Path +import re +import json + + +def extract_method_info(node): + """Retrieves method info from node + + Args: + node (ast.FunctionDef): method to use + + Returns: + dict: dictionary containing keys "name" and "description" + """ + if not isinstance(node, ast.FunctionDef): + return None + + method_info = { + "name": node.name, + "description": ast.get_docstring(node), + } + return method_info + + +def extract_class_info(node): + """Retrieves class info from node + + Args: + node (ast.ClassDef): class to use + + Returns: + list: list of method info dictionaries from a class + """ + if not isinstance(node, ast.ClassDef): + return None + + class_info = {"class": node.name, "methods": []} + + for item in node.body: + if isinstance(item, ast.FunctionDef) and ( + not item.name.startswith("_") or item.name == "__init__" + ): + method_info = extract_method_info(item) + if method_info: + class_info["methods"].append(method_info) + return class_info + + +def parse_python_file(file_path): + """Given a file path, reads contents to find each class in file + + Args: + file_path (str): file path to read + + Returns: + list: list of class info lists + """ + with open(file_path, "r") as file: + content = file.read() + + tree = ast.parse(content) + classes = [] + for item in tree.body: + if isinstance(item, ast.ClassDef): + class_info = extract_class_info(item) + if class_info: + classes.append(class_info) + + return classes + + +def format_method_name(method_name: str): + """Formats an API-formatted method name into a method name to be displayed in the GUI + + Args: + method_name (str): method name to format + + Returns: + str: formatted method name + """ + if method_name == "__init__": + return "Initialize" + + method_name = method_name.replace("_", " ") + method_name = method_name.title() + method_name = method_name.replace("Json", "JSON") + method_name = method_name.replace("Ids", "IDs") + uncap = ["To", "By", "From"] + + for w in uncap: + method_name = method_name.replace(w, w.lower()) + + return method_name + + +def parse_python_file(file_path): + with open(file_path, "r") as file: + content = file.read() + tree = ast.parse(content) + classes = [] + for item in tree.body: + if isinstance(item, ast.ClassDef): + class_info = extract_class_info(item) + if class_info: + classes.append(class_info) + return classes + + +def place_args(arg_descriptions): + """Places the argument names, their types, and their descriptions inside a dictionary + + Args: + arg_descriptions (list): arg_descriptions for a method inside of a class + """ + pattern = r"^(.*?)\((.*?)\): (.*?)$" + arg_descriptions = arg_descriptions.split("\n ") + for arg in arg_descriptions: + match = re.search(pattern, arg, re.MULTILINE) + if match: + name = match.group(1).strip() + types = match.group(2).strip() + des = match.group(3).strip() + if "optional" in types: + name += " - Optional" + types = types.split(", optional")[0] + if types == "bool": + types = "combo_box" + else: + types = "line_edit" + + all_dict[class_name][method_name].append( + {"label": format_method_name(name), "type": types, "description": des} + ) + else: + continue + + +# Parses, extracts, puts extractions into dictionary, converts to .json +if __name__ == "__main__": + base_path = Path(__file__).parent.parent / "api" + + python_file_path = base_path / "data_processing.py" + python_file_paths = [ + base_path / "data_processing.py", + base_path / "reporting.py", + base_path / "verification_case.py", + base_path / "verification_library.py", + base_path / "verification.py", + ] + + # If you need to convert paths to strings + python_file_path = str(python_file_path) + python_file_paths = [str(path) for path in python_file_paths] + + all_dict = {} + for p in python_file_paths: + extracted_classes = parse_python_file(p) + for class_info in extracted_classes: + class_name = class_info["class"] + all_dict[class_name] = {} + for method in class_info["methods"]: + method_name = method["name"] + method_name = format_method_name(method_name) + all_dict[class_name][method_name] = [] + + try: + description, arg_descriptions = method["description"].split( + "\n\nArgs:\n " + ) + try: + ( + arg_descriptions, + return_descriptions, + ) = arg_descriptions.split("\n\n", 1) + except ValueError: + return_descriptions = "" + except ValueError: + try: + description, return_descriptions = method["description"].split( + "\n\nReturns:\n" + ) + arg_descriptions = "" + except ValueError: + description, return_descriptions, arg_descriptions = ( + method["description"], + "", + "", + ) + if arg_descriptions: + place_args(arg_descriptions) + with open("constrain/app/schema.json", "w") as json_file: + json.dump(all_dict, json_file, indent=4) diff --git a/constrain/app/list_and_choice_popups.py b/constrain/app/list_and_choice_popups.py index 742d4856..f8fa3a33 100644 --- a/constrain/app/list_and_choice_popups.py +++ b/constrain/app/list_and_choice_popups.py @@ -19,7 +19,7 @@ # mapping from object to its methods and its methods to its parameters for display in popup script_directory = os.path.dirname(os.path.abspath(__file__)) -dependencies_path = os.path.join(script_directory, "dependencies.json") +dependencies_path = os.path.join(script_directory, "schema.json") api_to_method_path = os.path.join(script_directory, "api_to_method.json") with open(dependencies_path) as f: diff --git a/constrain/app/popup_window.py b/constrain/app/popup_window.py index a776ee0e..9590b1f2 100644 --- a/constrain/app/popup_window.py +++ b/constrain/app/popup_window.py @@ -24,7 +24,7 @@ from constrain.app.list_and_choice_popups import ListPopup, ChoicesPopup script_directory = os.path.dirname(os.path.abspath(__file__)) -dependencies_path = os.path.join(script_directory, "dependencies.json") +dependencies_path = os.path.join(script_directory, "schema.json") api_to_method_path = os.path.join(script_directory, "api_to_method.json") # mapping from object to its methods and its methods to its parameters for display in popup @@ -473,7 +473,7 @@ def on_state_selected(self): self.method_combo_box.addItems(methods) self.method_combo_box.show() - def make_and_add_groupbox(self, title, widget): + def make_and_add_groupbox(self, title, widget, description=None): """Creates and adds QGroupBox to layout with given title and widget Args: @@ -487,11 +487,9 @@ def make_and_add_groupbox(self, title, widget): gb.setTitle(title) layout = QVBoxLayout() - # tt_label = QLabel() - # pixmap = QPixmap("tt.png") - # tt_label.setPixmap(pixmap) + if description: + gb.setToolTip(description) - # layout.addWidget(tt_label) layout.addWidget(widget) gb.setLayout(layout) self.form_layout.addWidget(gb) @@ -554,11 +552,16 @@ def update_form(self, custom=False): # create groupboxes for each necessary field for the method for field in fields: if field["type"] == "line_edit": - self.make_and_add_groupbox(field["label"], QLineEdit()) + self.make_and_add_groupbox( + field["label"], QLineEdit(), field["description"] + ) elif field["type"] == "combo_box": combo_box = QComboBox() combo_box.addItems(["", "True", "False"]) - self.make_and_add_groupbox(field["label"], combo_box) + combo_box.setCurrentText("False") + self.make_and_add_groupbox( + field["label"], combo_box, field["description"] + ) payload_widget = QGroupBox() payload_widget.setTitle("Payloads") diff --git a/constrain/app/schema.json b/constrain/app/schema.json new file mode 100644 index 00000000..77ca2072 --- /dev/null +++ b/constrain/app/schema.json @@ -0,0 +1,402 @@ +{ + "DataProcessing": { + "Initialize": [ + { + "label": "Data - Optional", + "type": "line_edit", + "description": "Path to the data (CSV format) to be loaded for processing." + }, + { + "label": "Data Source - Optional", + "type": "line_edit", + "description": "Data source name. Use `EnergyPlus` or `Other`." + }, + { + "label": "Timestamp Column Name - Optional", + "type": "line_edit", + "description": "Name of the column header that contains the time series timestamps." + } + ], + "Slice": [ + { + "label": "Start Time", + "type": "line_edit", + "description": "Python datetime object used as the slice start date of the data." + }, + { + "label": "End Time", + "type": "line_edit", + "description": "Python datetime object used as the slice end date of the data." + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ], + "Add Parameter": [ + { + "label": "Name", + "type": "line_edit", + "description": "Name of the parameter" + }, + { + "label": "Value", + "type": "line_edit", + "description": "Value of the parameter." + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ], + "Apply Function": [ + { + "label": "Variable Names", + "type": "line_edit", + "description": "List of variables used as input to the function. All elements in variable_names need to be in self.data.columns" + }, + { + "label": "New Variable Name", + "type": "line_edit", + "description": "Name of the new variable containing the result of the function for each time stamp." + }, + { + "label": "Function to Apply", + "type": "line_edit", + "description": "Name of the function to apply. Choices are: `sum`, `min`, `max`or `average` (or 'mean')." + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ], + "Summary": [], + "Concatenate": [ + { + "label": "Datasets", + "type": "line_edit", + "description": "List of datasets (pd.DataFrame) to concatenate with `data`." + }, + { + "label": "Axis", + "type": "line_edit", + "description": "1 or 0. 1 performs a vertical concatenation and 0 performs a horizontal concatenation." + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ], + "Check": [], + "Fill Missing Values": [ + { + "label": "Method", + "type": "line_edit", + "description": "Method to use to fill the missing values: 'linear' (treat values as equally spaced) or 'pad' (use existing values)." + }, + { + "label": "Variable Names - Optional", + "type": "line_edit", + "description": "List of variable names that need missing values to be filled. By default, fill all missing data in self.data" + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ], + "Plot": [ + { + "label": "Variable Names", + "type": "line_edit", + "description": "List of variables to plot. The variables must be in the data." + }, + { + "label": "Kind", + "type": "line_edit", + "description": "Type of chart to plot, either'timeseries', or 'scatter'." + } + ], + "Downsample": [ + { + "label": "Frequency Type", + "type": "line_edit", + "description": "Downsampling frequency. Either 'day', 'hour', 'minute', or 'second'." + }, + { + "label": "Number Of Periods", + "type": "line_edit", + "description": "Number of frequency used for downsampling. For instance, use 1 and a frequency_type of 'hour' to downsample the data to every hour." + }, + { + "label": "Sampling Function - Optional", + "type": "line_edit", + "description": "Function to apply during downsampling, either 'mean' or 'sum' or a dictionary of key value pairs where the keys correspond to all the variables in data and value are either 'mean' or sum'. By default, using mean to downsample." + }, + { + "label": "Inplace - Optional", + "type": "combo_box", + "description": "Modify the dataset directly. Defaults to False." + } + ] + }, + "Reporting": { + "Initialize": [], + "Report Multiple Cases": [ + { + "label": "Item Names", + "type": "line_edit", + "description": "List of unique verification item names. If the `item_names` argument is empty, all the verification results in the `verification_json` argument are reported." + } + ] + }, + "VerificationCase": { + "Initialize": [ + { + "label": "Cases - Optional", + "type": "line_edit", + "description": "A list of Dict. dictionary that includes verification case(s)." + }, + { + "label": "JSON Case Path - Optional", + "type": "line_edit", + "description": "Path to the verification case file. If the path ends with `*.json`, then the items in the JSON file are loaded. If the path points to a directory, then verification cases JSON files are loaded." + } + ], + "Load Verification Cases from JSON": [ + { + "label": "JSON Case Path", + "type": "line_edit", + "description": "path to the json file containing fully defined verification cases." + } + ], + "Save Case Suite to JSON": [ + { + "label": "JSON Path", + "type": "line_edit", + "description": "Path to the json file to save the cases." + }, + { + "label": "Case IDs - Optional", + "type": "line_edit", + "description": "Unique ids of verification cases to save. By default, save all cases in `self.case_suite`. Default to an empty list." + } + ], + "Create Verification Case Suite from Base Case": [ + { + "label": "Base Case", + "type": "line_edit", + "description": "base verification input information." + }, + { + "label": "Update Key Value", + "type": "line_edit", + "description": "the same format as the `base_case` arg, but the updating fields consist of a list of values to be populated with." + }, + { + "label": "Keep Base Case - Optional", + "type": "combo_box", + "description": "whether to keep the base case in returned list of verification cases. Default to False." + } + ], + "Validate Verification Case Structure": [ + { + "label": "Case", + "type": "line_edit", + "description": "case information that will be validated." + }, + { + "label": "Verbose", + "type": "combo_box", + "description": "whether to output verbose information. Default to False." + } + ], + "Validate": [], + "Save Verification Cases to JSON": [ + { + "label": "JSON Path", + "type": "line_edit", + "description": "json file path to save the cases." + }, + { + "label": "Cases", + "type": "line_edit", + "description": "List of complete verification cases Dictionary to save." + } + ], + "Read Case": [ + { + "label": "File Name", + "type": "line_edit", + "description": "file name to read from" + } + ], + "Same Case": [ + { + "label": "Case A", + "type": "line_edit", + "description": "First case in check" + }, + { + "label": "Case B", + "type": "line_edit", + "description": "Second case in check" + }, + { + "label": "Ignored Keys - Optional", + "type": "line_edit", + "description": "keys to ignore, defaults to [\"case_id_in_suite\"]" + } + ], + "Case Already In Suite": [ + { + "label": "Case", + "type": "line_edit", + "description": "case in question" + }, + { + "label": "Ignored Keys - Optional", + "type": "line_edit", + "description": "keys to ignore, defaults to [\"case_id_in_suite\"]" + } + ], + "Check JSON Path Type": [ + { + "label": "JSON Path", + "type": "line_edit", + "description": "path to check" + } + ], + "Check Type": [ + { + "label": "Var Name", + "type": "line_edit", + "description": "name of variable" + }, + { + "label": "Var Value", + "type": "line_edit", + "description": "value of variable" + }, + { + "label": "Var Type", + "type": "line_edit", + "description": "type of variable" + } + ], + "Check File": [ + { + "label": "File Path Name", + "type": "line_edit", + "description": "name of file path" + }, + { + "label": "File Path", + "type": "line_edit", + "description": "file path" + } + ] + }, + "VerificationLibrary": { + "Initialize": [ + { + "label": "Lib Path - Optional", + "type": "line_edit", + "description": "path to the verification library file or folder. If the path ends with `*.json`, then library items defined in the json file are loaded. If the path points to a directory, then library items in all jsons in this directory and its subdirectories are loaded. Library item need to have unique name defined in the json files and python files. Defaults to None." + } + ], + "Get Library Item": [ + { + "label": "Item Name", + "type": "line_edit", + "description": "Verification item name to get." + } + ], + "Validate Library": [ + { + "label": "Items", + "type": "line_edit", + "description": "Library items to validate. `items` must be filled with valid verification item(s). If not, an error occurs. Defaults to empty list." + } + ], + "Get Library Items": [ + { + "label": "Items - Optional", + "type": "line_edit", + "description": "Library items to get. By default, get all library items loaded at instantiation. Defaults to empty list." + } + ], + "Get Applicable Library Items by Datapoints": [ + { + "label": "Datapoints", + "type": "line_edit", + "description": "Datapoints names." + } + ], + "Get Required Datapoints by Library Items": [ + { + "label": "Items", + "type": "line_edit", + "description": "Library items to summarize datapoints from. By default, summarize all library items loaded at instantiation. Defaults to empty list." + } + ] + }, + "Verification": { + "Initialize": [ + { + "label": "Verficiations - Optional", + "type": "line_edit", + "description": "a VerificationCase" + } + ], + "Configure": [ + { + "label": "Output Path", + "type": "line_edit", + "description": "Verification results output path." + }, + { + "label": "Lib Items Path - Optional", + "type": "line_edit", + "description": "User provided verification item json path (include name of the file with extension)." + }, + { + "label": "Lib Classes Py File - Optional", + "type": "line_edit", + "description": "User provided verification item python classes file." + }, + { + "label": "Plot Option - Optional", + "type": "line_edit", + "description": "Type of plots to include. It should either be all-compact, all-expand, day-compact, or day-expand. It can also be None, which will plot all types. Default to None." + }, + { + "label": "Fig Size - Optional", + "type": "line_edit", + "description": "Tuple of integers (length, height) describing the size of the figure to plot. Defaults to (6.4, 4.8)." + }, + { + "label": "Num Threads - Optional", + "type": "line_edit", + "description": "Number of threads to run verifications in parallel. Defaults to 1." + }, + { + "label": "Preprocessed Data - Optional", + "type": "line_edit", + "description": "Pre-processed data stored in the data frame. Default to None." + } + ], + "Run Single Verification": [ + { + "label": "Case", + "type": "line_edit", + "description": "Verification case dictionary." + } + ], + "Run": [] + } +} \ No newline at end of file