diff --git a/CHANGES.txt b/CHANGES.txt index 5007dff15..82b0cc5af 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,21 @@ +V3.9.0 + - Project can have a comment (tooltip over the project name and click will edit it). Will be populated with templates description. + - Project Color modes: + size color mode removed: Problematic in slow setups or huge projects. + labels color mode merged with status color mode. + - Manual subsets works when SCIPION_USE_QUEUE is active. For now this execution is not sent to the QUEUE: Might need to ask for the queue parameters. + - Step details show it step needs a GPU + - Folder size not showing in protocol's tooltip: poor performance in some setups + - Threads field and help improved: Distinguish between "Scipion threads" and "Threads" (program threads) + - create script accepts a comment (location optional as -) + - GPU "queue": + Basic GPU queue implemented for a more robust GPU specification in combination with threads. See threadmpigpu help page in the doc. Linked in the help. + Tests added + - Tolerate non exisiting SCIPION_SPRITES_FILE falling back to default value. + - Scroll and zoom gestures swap to more standard usage: Control + scroll = zoom, Shift + Scroll = Horizontal scroll. + +developers: + - New constants for "id" and "_objId": ID_COLUMN='id', ID_ATTRIBUTE='_objId' V3.8.0 users: - Inform when a plugin does not import properly diff --git a/pyworkflow/config.py b/pyworkflow/config.py index a93db7b60..cc017884a 100644 --- a/pyworkflow/config.py +++ b/pyworkflow/config.py @@ -198,6 +198,7 @@ def join(self, *path): # Internal cached variables, use __ so they are not returned in getVars __activeColor = None + __defaultSpritesFile = _join(getResourcesPath(),'sprites.png') CONDA_ACTIVATION_CMD = _get(CONDA_ACTIVATION_CMD_VAR,'', "str: Command to activate/initialize conda itself. Do not confuse it with 'conda activate'. It should be defined at installation time. It looks like this: eval \"$(/extra/miniconda3/bin/conda shell.bash hook)\"") @@ -299,7 +300,7 @@ def join(self, *path): SCIPION_CONTRAST_COLOR = _get('SCIPION_CONTRAST_COLOR', 'cyan', "Color used to highlight features over grayscaled images.", caster=validColor) - SCIPION_SPRITES_FILE = _get('SCIPION_SPRITES_FILE', _join(getResourcesPath(),'sprites.png'), + SCIPION_SPRITES_FILE = _get('SCIPION_SPRITES_FILE', __defaultSpritesFile, "File (png) with the icons in a collage. Default is found at pyworkflow/resources/sprites.png. And a GIMP file could be found at the same folder in the github repo.") SCIPION_SHOW_TEXT_IN_TOOLBAR = _get('SCIPION_SHOW_TEXT_IN_TOOLBAR', TRUE_STR, @@ -544,7 +545,12 @@ def isCondaInstallation(cls): condaExe = os.path.join(envFolder, "bin", "python") return condaExe == getPython() - + @classmethod + def getSpritesFile(cls): + if not os.path.exists(Config.SCIPION_SPRITES_FILE): + return cls.__defaultSpritesFile + else: + return Config.SCIPION_SPRITES_FILE # Add bindings folder to sys.path diff --git a/pyworkflow/constants.py b/pyworkflow/constants.py index e3130aede..76b094806 100644 --- a/pyworkflow/constants.py +++ b/pyworkflow/constants.py @@ -43,7 +43,7 @@ VERSION_1_1 = '1.1.0' VERSION_1_2 = '1.2.0' VERSION_2_0 = '2.0.0' -VERSION_3_0 = '3.8.0' +VERSION_3_0 = '3.9.0' # For a new release, define a new constant and assign it to LAST_VERSION # The existing one has to be added to OLD_VERSIONS list. @@ -192,6 +192,7 @@ class DOCSITEURLS: WAIT_FOR = GUI + '#waiting-for-other-protocols' PLUGIN_MANAGER = USER + 'plugin-manager.html' HOST_CONFIG = DOCS + "scipion-modes/host-configuration.html" + THREADS_MPIS_AND_GPUS = USER + 'threads-mpi-gpus.html' # tkinter bind constants @@ -211,3 +212,7 @@ class TK: DEFAULT_EXECUTION_ACTION_SINGLE = 2 DEFAULT_EXECUTION_ACTION_ALL = 3 +# Id field/attribute constants +ID_COLUMN='id' +ID_ATTRIBUTE='_objId' + diff --git a/pyworkflow/gui/canvas.py b/pyworkflow/gui/canvas.py index c6d48ecf5..09ee8e28e 100644 --- a/pyworkflow/gui/canvas.py +++ b/pyworkflow/gui/canvas.py @@ -82,8 +82,8 @@ def __init__(self, parent, tooltipCallback=None, tooltipDelay=1500, **kwargs): self.bind("", self.onControlClick) # self.bind("", self.onScroll) # Scroll bindings in Linux - self.bind("", self.zoomerP) - self.bind("", self.zoomerM) + self.bind("", self.zoomerP) + self.bind("", self.zoomerM) self._tooltipId = None self._tooltipOn = False # True if the tooltip is displayed @@ -475,7 +475,7 @@ def _drawNodes(self, node, visitedDict={}): self.addItem(item) if getattr(node, 'expanded', True): - for child in node.getChilds(): + for child in node.getChildren(): if self.nodeList is None: self._drawNodes(child, visitedDict) elif self.nodeList.getNode(child.run.getObjId()).isVisible(): @@ -512,7 +512,7 @@ def _visibleParents(self, node, parentlist): """ Return a list with the visible parents of the node's children """ - for child in node.getChilds(): + for child in node.getChildren(): parents = child.getParents() for parent in parents: parentNode = self.nodeList.getNode(parent.run.getObjId()) @@ -525,7 +525,7 @@ def _setupParentProperties(self, node, visitedDict): the properties (width, height, x and y) is propagated to the hidden childs. """ - for child in node.getChilds(): + for child in node.getChildren(): if child.getName() not in visitedDict: child.width = node.width child.height = node.height @@ -544,7 +544,7 @@ def _updatePositions(self, node, visitedDict=None, createEdges=True): item.moveTo(node.x, node.y) if getattr(node, 'expanded', True): - for child in node.getChilds(): + for child in node.getChildren(): if self.nodeList is None: self.createEdge(item, child.item) self._updatePositions(child, visitedDict, createEdges) diff --git a/pyworkflow/gui/dialog.py b/pyworkflow/gui/dialog.py index 103bed164..32c42460c 100644 --- a/pyworkflow/gui/dialog.py +++ b/pyworkflow/gui/dialog.py @@ -35,7 +35,7 @@ from pyworkflow import Config from pyworkflow.exceptions import PyworkflowException from pyworkflow.utils import Message, Icon, Color -from . import gui, Window, widgets, configureWeigths, LIST_TREEVIEW, defineStyle, ToolTip +from . import gui, Window, widgets, configureWeigths, LIST_TREEVIEW, defineStyle, ToolTip, getDefaultFont from .tree import BoundTree, Tree from .text import Text, TaggedText @@ -443,7 +443,8 @@ def body(self, bodyFrame): label = tk.Label(bodyFrame, text=self.entryLabel, bg=Config.SCIPION_BG_COLOR, bd=0) label.grid(row=row, column=0, sticky='nw', padx=(15, 10), pady=15) self.entry = tk.Entry(bodyFrame, bg=gui.cfgEntryBgColor, - width=self.entryWidth, textvariable=self.tkvalue) + width=self.entryWidth, textvariable=self.tkvalue, + font=getDefaultFont()) self.entry.grid(row=row, column=1, sticky='new', padx=(0, 15), pady=15) self.initial_focus = self.entry diff --git a/pyworkflow/gui/form.py b/pyworkflow/gui/form.py index 3176a5d37..f3eb3230a 100644 --- a/pyworkflow/gui/form.py +++ b/pyworkflow/gui/form.py @@ -1698,14 +1698,19 @@ def _createParallel(self, runFrame, r): c2 = 0 sticky = 'e' + helpMessage = pwutils.Message.HELP_PARALLEL_HEADER + if mode == pwprot.STEPS_PARALLEL: if allowThreads and numberOfThreads > 0: prot.numberOfMpi.set(1) - self._createHeaderLabel(procFrame, pwutils.Message.LABEL_THREADS, + self._createHeaderLabel(procFrame, pwutils.Message.LABEL_SCIPION_THREADS, sticky=sticky, row=r2, column=c2, pady=0) entry = self._createBoundEntry(procFrame, pwutils.Message.VAR_THREADS) + + helpMessage += pwutils.Message.HELP_SCIPION_THREADS + entry.grid(row=r2, column=c2 + 1, padx=(0, 5), sticky='w') elif allowMpi and numberOfMpi > 1: self.showError("MPI parameter is deprecated for protocols " @@ -1728,6 +1733,8 @@ def _createParallel(self, runFrame, r): # Modify values to be used in MPI entry c2 += 2 sticky = 'w' + + helpMessage += pwutils.Message.HELP_PARALLEL_THREADS # ---- MPI ---- if allowMpi: self._createHeaderLabel(procFrame, pwutils.Message.LABEL_MPI, @@ -1736,11 +1743,14 @@ def _createParallel(self, runFrame, r): entry = self._createBoundEntry(procFrame, pwutils.Message.VAR_MPI) entry.grid(row=r2, column=c2 + 1, padx=(0, 5), sticky='w') + helpMessage += pwutils.Message.HELP_PARALLEL_MPI + + btnHelp = IconButton(procFrame, pwutils.Message.TITLE_COMMENT, pwutils.Icon.ACTION_HELP, highlightthickness=0, command=self._createHelpCommand( - pwutils.Message.HELP_MPI_THREADS)) + helpMessage)) btnHelp.grid(row=0, column=4, padx=(5, 0), pady=2, sticky='e') procFrame.columnconfigure(0, minsize=60) diff --git a/pyworkflow/gui/graph.py b/pyworkflow/gui/graph.py index 6277f1e18..69dede6ab 100644 --- a/pyworkflow/gui/graph.py +++ b/pyworkflow/gui/graph.py @@ -85,7 +85,7 @@ def _setLevel(self, node, level, parent): nextLevel = level + 1 if nextLevel > self.maxLevel: return - for child in node.getChilds(): + for child in node.getChildren(): if nextLevel > getattr(child, 'level', 0): self._setLevel(child, nextLevel, node) @@ -97,7 +97,7 @@ def _paintNodeWithChilds(self, node, level): if level > self.maxLevel: return - childs = [c for c in node.getChilds() if c.parent is node] + childs = [c for c in node.getChildren() if c.parent is node] n = len(childs) if n > 0: @@ -174,7 +174,7 @@ def _getHLimits(self, node): for each level of the tree """ node.hLimits = [[-node.half, node.half]] - childs = [c for c in node.getChilds() if c.parent is node] + childs = [c for c in node.getChildren() if c.parent is node] for child in childs: count = 1 if not hasattr(child, 'hLimits'): @@ -221,7 +221,7 @@ def _createEdges(self, node, x): if node.level == self.maxLevel: return - for c in node.getChilds(): + for c in node.getChildren(): if c.parent is node: self._createEdges(c, nx) self.createEdge(node.item, c.item) @@ -230,7 +230,7 @@ def _paintNodeWithPosition(self, node): """ Paint nodes using its position. """ self._paintNode(node, None) - for child in node.getChilds(): + for child in node.getChildren(): # parent = None for nodes that have been not traversed parent = getattr(child, 'parent', None) if parent is None: @@ -241,7 +241,7 @@ def _paintEdges(self, node): """ Paint only the edges between nodes, assuming they are already well positioned. """ - for child in node.getChilds(): + for child in node.getChildren(): if child.parent is node: self._paintEdges(child) self.createEdge(node.item, child.item) diff --git a/pyworkflow/gui/graph_layout.py b/pyworkflow/gui/graph_layout.py index e753f54fb..fdc1663dd 100644 --- a/pyworkflow/gui/graph_layout.py +++ b/pyworkflow/gui/graph_layout.py @@ -132,7 +132,7 @@ def _setLayoutLevel(self, node, level, parent, ancestors=[]): if self.__isNodeExpanded(node): ancestors.append(node.getName()) - for child in node.getChilds(): + for child in node.getChildren(): if child.getName() in ancestors: logger.warning("WARNING: There might be a cyclic redundancy error in this protocol: %s (%s)" %(child.getLabel(), child.getName())) @@ -155,7 +155,7 @@ def __getNodeChilds(self, node): visited by this node first (its 'parent') """ if self.__isNodeExpanded(node): - return [c for c in node.getChilds() if c._layout['parent'] is node] + return [c for c in node.getChildren() if c._layout['parent'] is node] else: return [] # treat collapsed nodes as if they have no childs diff --git a/pyworkflow/gui/project/base.py b/pyworkflow/gui/project/base.py index a05f90891..36459d46b 100644 --- a/pyworkflow/gui/project/base.py +++ b/pyworkflow/gui/project/base.py @@ -28,7 +28,7 @@ import tkinter as tk import pyworkflow as pw -from pyworkflow.gui import Window, Message, Color, getBigFont, defineStyle +from pyworkflow.gui import Window, Message, Color, getBigFont, defineStyle, ToolTip from pyworkflow.gui.widgets import GradientFrame from pyworkflow.utils.properties import Icon from pyworkflow.gui.project.variables import VariablesDialog @@ -98,18 +98,18 @@ def createHeaderFrame(self, parent): bg=pw.Config.SCIPION_BG_COLOR) versionLabel.grid(row=0, column=1, sticky='sw', pady=20) - # Create the Project Name label - projName = getattr(self, 'projName', '') - projLabel = tk.Label(header, text=projName, font=getBigFont(), - borderwidth=0, anchor='nw', bg=pw.Config.SCIPION_BG_COLOR, - fg=Color.ALT_COLOR_DARK) - projLabel.grid(row=0, column=2, sticky='sw', padx=(20, 5), pady=10) + self.customizeheader(header) + # Create gradient GradientFrame(header, height=8, borderwidth=0).grid(row=1, column=0, columnspan=3, sticky='new') return header + def customizeheader(self, headerFrame): + """ Implement in children classes to customize it: e.g.: Project name in project window""" + pass + def addViewList(self, header): """Create the view selection frame (Protocols|Data) in the header. """ diff --git a/pyworkflow/gui/project/project.py b/pyworkflow/gui/project/project.py index 622c5df16..bb0e023ec 100644 --- a/pyworkflow/gui/project/project.py +++ b/pyworkflow/gui/project/project.py @@ -31,6 +31,10 @@ """ import logging +from tkinter import Label + +from .. import askString + logger = logging.getLogger(__name__) import os @@ -43,7 +47,7 @@ import pyworkflow.utils as pwutils from pyworkflow.gui.project.utils import OS from pyworkflow.project import MenuConfig -from pyworkflow.gui import Message, Icon +from pyworkflow.gui import Message, Icon, getBigFont, ToolTip from pyworkflow.gui.browser import FileBrowserWindow from pyworkflow.gui.plotter import Plotter @@ -61,7 +65,10 @@ class ProjectWindow(ProjectBaseWindow): def __init__(self, path, master=None): # Load global configuration - self.projName = os.path.basename(path) + self.projPath = path + self.project = self.loadProject() + self.projName = self.project.getShortName() + try: projTitle = '%s (%s on %s)' % (self.projName, pwutils.getLocalUserName(), @@ -69,8 +76,6 @@ def __init__(self, path, master=None): except Exception: projTitle = self.projName - self.projPath = path - self.project = self.loadProject() # TODO: put the menu part more nicely. From here: menu = MenuConfig() @@ -119,6 +124,8 @@ def __init__(self, path, master=None): # Notify about the workflow in this project self.selectedProtocol = None self.showGraph = False + self.commentTT = None # Tooltip to store the project description + Plotter.setBackend('TkAgg') ProjectBaseWindow.__init__(self, projTitle, master, minsize=(90, 50), icon=Icon.SCIPION_ICON_PROJ, _class=self.projName) @@ -131,12 +138,30 @@ def __init__(self, path, master=None): ProjectWorkflowNotifier(self.project).notifyWorkflow() + def createHeaderFrame(self, parent): """Create the header and add the view selection frame at the right.""" header = ProjectBaseWindow.createHeaderFrame(self, parent) self.addViewList(header) return header + def customizeheader(self, headerFrame): + """ Adds the Project name in the header frame""" + # Create the Project Name label + + projLabel = Label(headerFrame, text=self.projName, font=getBigFont(), + borderwidth=0, anchor='nw', bg=pw.Config.SCIPION_BG_COLOR, + fg=pw.Color.ALT_COLOR_DARK) + projLabel.bind("", self.setComment) + projLabel.grid(row=0, column=2, sticky='sw', padx=(20, 5), pady=10) + + self.commentTT = ToolTip(projLabel, self.project.getComment(), 200) + def setComment(self, e): + + newComment = askString("Change project description", "Description", self.root, entryWidth=100, defaultValue=self.project.getComment()) + self.commentTT.configure(text=newComment) + self.project.setComment(newComment) + self.project._storeCreationTime() # Comment is stored as creation time comment for now def getSettings(self): return self.settings @@ -334,30 +359,6 @@ def myfunc(): except Exception as ex: logger.error("There was an error executing object command !!!:", exc_info=ex) - - def recalculateCTF(self, inputObjId, sqliteFile): - """ Load the project and launch the protocol to - create the subset. - """ - # Retrieve project, input protocol and object from db - project = self.project - inputObj = project.mapper.selectById(int(inputObjId)) - parentProtId = inputObj.getObjParentId() - parentProt = project.mapper.selectById(parentProtId) - protDep = project._getProtocolsDependencies([parentProt]) - if protDep: - prot = project.copyProtocol(parentProt) - prot.continueRun.set(parentProt) - else: - prot = parentProt - prot.isFirstTime.set(True) - - # Define the input params of the new protocol - prot.recalculate.set(True) - prot.sqliteFile.set(sqliteFile) - # Launch the protocol - self.getViewWidget().executeProtocol(prot) - class ProjectManagerWindow(ProjectBaseWindow): """ Windows to manage all projects. """ @@ -466,6 +467,13 @@ def handle(self): attr.set(obj) elif value: attr.set(value) + + if protocol.useQueue(): + # Do not use the queue in this case otherwise we need to ask for queue parameters. + # Maybe something to do in the future. But now this logic is in form.py. + logger.warning('Cancelling launching protocol "%s" to the queue.' % protocol) + protocol._useQueue.set(False) + # project.launchProtocol(protocol) # We need to enqueue the action of execute a new protocol # to be run in the same GUI thread and avoid concurrent diff --git a/pyworkflow/gui/project/steps.py b/pyworkflow/gui/project/steps.py index ecf0e5038..848e54408 100644 --- a/pyworkflow/gui/project/steps.py +++ b/pyworkflow/gui/project/steps.py @@ -30,7 +30,6 @@ import pyworkflow.gui as pwgui import pyworkflow.utils as pwutils - from pyworkflow import TK from pyworkflow.utils import Icon @@ -59,11 +58,17 @@ def getObjectInfo(obj): return info @staticmethod - def getObjectPreview(obj): + def getObjectPreview(obj: pwprot.Step): args = json.loads(obj.argsStr.get()) msg = "*Prerequisites*: %s \n" % str(obj._prerequisites) - msg += "*Arguments*: " + '\n '.join([str(a) for a in args]) + + msg += ("*Arguments*:\n") + for arg in args: + msg += " %s\n" % arg + + msg += "*Needs GPU*: %s" % obj.needsGPU() + if hasattr(obj, 'resultFiles'): results = json.loads(obj.resultFiles.get()) if len(results): diff --git a/pyworkflow/gui/project/viewdata.py b/pyworkflow/gui/project/viewdata.py index 3c19fe766..2c32d436f 100644 --- a/pyworkflow/gui/project/viewdata.py +++ b/pyworkflow/gui/project/viewdata.py @@ -55,7 +55,7 @@ def populateTree(tree, elements, parentId=''): t += ' (%d)' % node.count node.nodeId = tree.insert(parentId, 'end', node.getName(), text=t, tags=DATA_TAG) - populateTree(tree, node.getChilds(), node.nodeId) + populateTree(tree, node.getChildren(), node.nodeId) if node.count: tree.see(node.nodeId) tree.item(node.nodeId, tags=('non-empty', DATA_TAG)) diff --git a/pyworkflow/gui/project/viewprotocols.py b/pyworkflow/gui/project/viewprotocols.py index aac91df56..9e50334d3 100644 --- a/pyworkflow/gui/project/viewprotocols.py +++ b/pyworkflow/gui/project/viewprotocols.py @@ -808,7 +808,7 @@ def createRunItem(self, canvas, node): item.margin = 0 # Paint the oval..if apply. - self._paintOval(item, statusColor) + #self._paintOval(item, statusColor) # Paint the bottom line (for now only labels are painted there). self._paintBottomLine(item) @@ -821,7 +821,7 @@ def _getBoxColor(self, nodeInfo, statusColor, node): try: # If the color has to go to the box - if self.settings.statusColorMode(): + if self.settings.statusColorMode() or self.settings.labelsColorMode(): boxColor = statusColor elif self.settings.ageColorMode(): @@ -931,7 +931,7 @@ def _getLabelsCount(nodeInfo): def _paintBottomLine(self, item): - if self.settings.labelsColorMode(): + if self.settings.labelsColorMode() or self.settings.statusColorMode(): self._addLabels(item) def _paintOval(self, item, statusColor): @@ -967,7 +967,7 @@ def _getNodeText(self, node): nodeText = nodeText[:37] + "..." if node.run: - expandedStr = '' if node.expanded else '\n ➕ %s more' % str(node.countChilds({})) + expandedStr = '' if node.expanded else '\n ➕ %s more' % str(node.countChildren({})) if self.runsView == VIEW_TREE_SMALL: nodeText = node.getName() + expandedStr else: @@ -978,7 +978,7 @@ def _getNodeText(self, node): def _addLabels(self, item): # If there is only one label it should be already used in the box color. - if self._getLabelsCount(item.nodeInfo) < 2: + if self._getLabelsCount(item.nodeInfo) < 1: return # Get the positions of the box (topLeftX, topLeftY, bottomRightX, @@ -1056,7 +1056,7 @@ def _toggleColorScheme(self, e=None): nextColorMode = currentMode + 1 self.settings.setColorMode(nextColorMode) - self._updateActionToolbar() + # WHY? self._updateActionToolbar() # self.updateRunsGraph() self.drawRunsGraph() self._infoAboutColorScheme() @@ -1283,7 +1283,7 @@ def _runItemTooltip(self, tw, item): tm += 'Status: %s\n' % prot.getStatusMessage() tm += 'Wall time: %s\n' % pwutils.prettyDelta(prot.getElapsedTime()) tm += 'CPU time: %s\n' % pwutils.prettyDelta(dt.timedelta(seconds=prot.cpuTime)) - tm += 'Folder size: %s\n' % pwutils.prettySize(prot.getSize()) + # tm += 'Folder size: %s\n' % pwutils.prettySize(prot.getSize()) if not hasattr(tw, 'tooltipText'): frame = tk.Frame(tw) @@ -1805,7 +1805,7 @@ def _selectNodes(self, down=True, fromRun=None): # Go in the direction . for run in nodesToSelect: # Choose the direction: down or up. - direction = run.getChilds if down else run.getParents + direction = run.getChildren if down else run.getParents # Select himself plus ancestors for parent in direction(): @@ -2054,7 +2054,7 @@ def _runActionClicked(self, action, event=None): def setVisibleNodes(self, node, visible=True): hasParentHidden = False - for child in node.getChilds(): + for child in node.getChildren(): prot = child.run nodeInfo = self.settings.getNodeById(prot.getObjId()) if visible: diff --git a/pyworkflow/gui/project/viewprotocols_extra.py b/pyworkflow/gui/project/viewprotocols_extra.py index 7fd1cc38b..04c8c2ffb 100644 --- a/pyworkflow/gui/project/viewprotocols_extra.py +++ b/pyworkflow/gui/project/viewprotocols_extra.py @@ -245,7 +245,7 @@ def pointerToInfo(): if obj is None or not obj.hasValue(): return None - if isinstance(obj, pwobj.String): + if isinstance(obj, pwobj.String) and not obj.getName(): info = stringToInfo() else: # All attributes are considered output, unless they are pointers @@ -504,7 +504,7 @@ def load(cls, domain, protocolsConf): except Exception as e: print('Failed to read settings. The reported error was:\n %s\n' - 'To solve it, fix %s and run again.' % e) + 'To solve it, fix %s and run again.' % (e, pluginName)) # Clean empty sections cls._hideEmptySections(protocols) @@ -557,4 +557,4 @@ def addSubMenu(self, text, value=None, shortCut=None, **args): return MenuConfig.addSubMenu(self, text, value, **args) def __str__(self): - return self.text \ No newline at end of file + return self.text diff --git a/pyworkflow/gui/widgets.py b/pyworkflow/gui/widgets.py index 8d935d9b6..235957299 100644 --- a/pyworkflow/gui/widgets.py +++ b/pyworkflow/gui/widgets.py @@ -161,8 +161,8 @@ def bindWidget(self, widget): widget.bind("", self.scrollV) widget.bind("", self.scrollV) - widget.bind("", self.scrollH) - widget.bind("", self.scrollH) + widget.bind("", self.scrollH) + widget.bind("", self.scrollH) def getVScroll(self): diff --git a/pyworkflow/mapper/sqlite.py b/pyworkflow/mapper/sqlite.py index b5b9697dd..91ec3f192 100644 --- a/pyworkflow/mapper/sqlite.py +++ b/pyworkflow/mapper/sqlite.py @@ -27,12 +27,12 @@ import re from collections import OrderedDict -from pyworkflow import Config +from pyworkflow import Config, ID_ATTRIBUTE, ID_COLUMN from pyworkflow.utils import replaceExt, joinExt, valueToList from .sqlite_db import SqliteDb, OperationalError from .mapper import Mapper -ID = 'id' +ID = ID_COLUMN CREATION = 'creation' PARENT_ID = 'parent_id' CLASSNAME = 'classname' @@ -210,7 +210,7 @@ def fillObjectWithRow(self, obj, objRow): rowId = objRow[ID] rowName = self._getStrValue(objRow['name']) - if not hasattr(obj, '_objId'): + if not hasattr(obj, ID_ATTRIBUTE): raise Exception("Entry '%s' (id=%s) in the database, stored as '%s'" ", is being mapped to %s object. " % (rowName, rowId, @@ -1356,8 +1356,8 @@ def setupCommands(self, objDict): def addCommonFieldsToMap(self): # Add common fields to the mapping - self._columnsMapping["id"] = "id" - self._columnsMapping["_objId"] = "id" + self._columnsMapping[ID_COLUMN] = ID_COLUMN + self._columnsMapping[ID_ATTRIBUTE] = ID_COLUMN def getClassRows(self): """ Create a dictionary with names of the attributes diff --git a/pyworkflow/object.py b/pyworkflow/object.py index 8cdd706a1..396834bc6 100644 --- a/pyworkflow/object.py +++ b/pyworkflow/object.py @@ -66,7 +66,7 @@ def __init__(self, value=None, **kwargs): self._objId = None # Unique identifier of this object in some context self._objParentId = None # identifier of the parent object self._objName = '' # The name of the object will contains the whole path of ancestors - self._objLabel = '' # This will serve to label the objects + self._objLabel = '' # This will serve to label the objects self._objComment = '' # Performance: self._objTag = None # This attribute serve to make some annotation on the object. self._objDoStore = True diff --git a/pyworkflow/plugin.py b/pyworkflow/plugin.py index 83b8f1683..991154dc7 100644 --- a/pyworkflow/plugin.py +++ b/pyworkflow/plugin.py @@ -124,6 +124,7 @@ def registerPlugin(cls, name): (pwutils.yellow("WARNING!!: Plugin containing module %s does not import properly. " "All its content will be missing in this execution." % name)) logger.info("Please, contact developers at %s and send this ugly information below. They'll understand it!." % DOCSITEURLS.CONTACTUS) + logger.info("Error message: %s"% str(e)) logger.info(pwutils.yellow(traceback.format_exc())) @classmethod diff --git a/pyworkflow/project/config.py b/pyworkflow/project/config.py index f2aecc8d8..d018165dd 100644 --- a/pyworkflow/project/config.py +++ b/pyworkflow/project/config.py @@ -40,7 +40,7 @@ class ProjectSettings(pwobj.Object): COLOR_MODE_LABELS = 1 COLOR_MODE_AGE = 2 COLOR_MODE_SIZE = 3 - COLOR_MODES = (COLOR_MODE_STATUS, COLOR_MODE_LABELS, COLOR_MODE_AGE, COLOR_MODE_SIZE) + COLOR_MODES = (COLOR_MODE_STATUS, COLOR_MODE_LABELS, COLOR_MODE_AGE) # This has poor performance many cases, COLOR_MODE_SIZE) def __init__(self, confs={}, **kwargs): super().__init__(**kwargs) @@ -112,6 +112,9 @@ def getColorMode(self): def setColorMode(self, colorMode): """ Set the color mode to use when drawing the graph. """ + # Skip LABELS color mode to avoid double iteration + if colorMode == self.COLOR_MODE_LABELS: + colorMode+=1 self.colorMode.set(colorMode) def statusColorMode(self): diff --git a/pyworkflow/project/manager.py b/pyworkflow/project/manager.py index 1cbafdb52..56ce4c5d2 100644 --- a/pyworkflow/project/manager.py +++ b/pyworkflow/project/manager.py @@ -97,9 +97,9 @@ def listProjects(self, sortByDate=True): return projList def createProject(self, projectName, runsView=1, - hostsConf=None, protocolsConf=None, location=None): + hostsConf=None, protocolsConf=None, location=None, comment=None): """Create a new project. - confs dict can contains customs .conf files + confs dict can contain customs .conf files for: menus, protocols, or hosts """ # Clean project name from undesired characters @@ -118,7 +118,8 @@ def createProject(self, projectName, runsView=1, project = Project(pw.Config.getDomain(), projectPath) project.create(runsView=runsView, hostsConf=hostsConf, - protocolsConf=protocolsConf) + protocolsConf=protocolsConf, + comment=comment) # If location is not the default one create a symlink on self.PROJECTS directory if projectPath != self.getProjectPath(projectName): # JMRT: Let's create the link to the absolute path, since relative diff --git a/pyworkflow/project/project.py b/pyworkflow/project/project.py index ea64a0d08..cc69b83ef 100644 --- a/pyworkflow/project/project.py +++ b/pyworkflow/project/project.py @@ -106,9 +106,11 @@ def __init__(self, domain, path): self.settings:config.ProjectSettings = None # Host configuration self._hosts = None + # Creation time should be stored in project.sqlite when the project # is created and then loaded with other properties from the database self._creationTime = None + # Time stamp with the last run has been updated self._lastRunTime = None @@ -145,7 +147,16 @@ def getCreationTime(self): """ Return the time when the project was created. """ # In project.create method, the first object inserted # in the mapper should be the creation time - return self._creationTime + return self._creationTime.datetime() + + + def getComment(self): + """ Returns the project comment. Stored as CreationTime comment.""" + return self._creationTime.getObjComment() + + def setComment(self, newComment): + """ Sets the project comment """ + self._creationTime.setObjComment(newComment) def getSettingsCreationTime(self): return self.settings.getCreationTime() @@ -154,7 +165,7 @@ def getElapsedTime(self): """ Returns the time elapsed from the creation to the last execution time. """ if self._creationTime and self._lastRunTime: - creationTs = self._creationTime + creationTs = self.getCreationTime() lastRunTs = self._lastRunTime.datetime() return lastRunTs - creationTs return None @@ -293,12 +304,16 @@ def _loadCreationTime(self): creationTime = self.mapper.selectBy(name=PROJECT_CREATION_TIME) if creationTime: # CreationTime was found in project.sqlite - self._creationTime = creationTime[0].datetime() + ctStr = creationTime[0] # This is our String type instance + + # We store it in mem as dateime + self._creationTime = ctStr + else: # We should read the creation time from settings.sqlite and # update the CreationTime in the project.sqlite - self._creationTime = self.getSettingsCreationTime() - self._storeCreationTime(self._creationTime) + self._creationTime = pwobj.String(self.getSettingsCreationTime()) + self._storeCreationTime() # ---- Helper functions to load different pieces of a project def _loadDb(self, dbPath): @@ -360,7 +375,7 @@ def getProtocolView(self): return self.settings.getProtocolView() def create(self, runsView=1, readOnly=False, hostsConf=None, - protocolsConf=None): + protocolsConf=None, comment=None): """Prepare all required paths and files to create a new project. :param runsView: default view to associate the project with @@ -376,7 +391,9 @@ def create(self, runsView=1, readOnly=False, hostsConf=None, # Create db through the mapper self.mapper = self.createMapper(self.dbPath) # Store creation time - self._storeCreationTime(dt.datetime.now()) + self._creationTime = pwobj.String(dt.datetime.now()) + self.setComment(comment) + self._storeCreationTime() # Load settings from .conf files and write .sqlite self.settings = self.createSettings(runsView=runsView, readOnly=readOnly) @@ -386,12 +403,11 @@ def create(self, runsView=1, readOnly=False, hostsConf=None, self._loadHosts(hostsConf) - def _storeCreationTime(self, creationTime): + def _storeCreationTime(self, new=True): """ Store the creation time in the project db. """ # Store creation time - creation = pwobj.String(objName=PROJECT_CREATION_TIME) - creation.set(creationTime) - self.mapper.insert(creation) + self._creationTime.setName(PROJECT_CREATION_TIME) + self.mapper.store(self._creationTime) self.mapper.commit() def _cleanData(self): @@ -820,7 +836,7 @@ def _getProtocolsDependencies(self, protocols): for prot in protocols: node = runsGraph.getNode(prot.strId()) if node: - childs = [node.run for node in node.getChilds() if + childs = [node.run for node in node.getChildren() if self.__validDependency(prot, node.run, protocols)] if childs: deps = [' ' + c.getRunName() for c in childs] @@ -839,7 +855,7 @@ def _getProtocolDescendents(self, protocol): visitedNodes[int(node.getName())] = node def getDescendents(rootNode): - for child in rootNode.getChilds(): + for child in rootNode.getChildren(): if int(child.getName()) not in visitedNodes: visitedNodes[int(child.getName())] = child getDescendents(child) @@ -998,7 +1014,7 @@ def _getSubworkflow(self, protocol, fixProtParam=True, getStopped=True): affectedProtocolsActive[protocol.getObjId()] = protocol node = runGraph.getNode(protocol.strId()) - dependencies = [node.run for node in node.getChilds()] + dependencies = [node.run for node in node.getChildren()] for dep in dependencies: if not dep.getObjId() in auxProtList: auxProtList.append([dep.getObjId(), level]) @@ -1034,7 +1050,7 @@ def deleteProtocolOutput(self, protocol, output): node = self.getRunsGraph().getNode(protocol.strId()) deps = [] - for node in node.getChilds(): + for node in node.getChildren(): for _, inputObj in node.run.iterInputAttributes(): value = inputObj.get() if (value is not None and @@ -1187,7 +1203,7 @@ def copyProtocol(self, protocol): node = g.getNode(prot.strId()) newProt = newDict[prot.getObjId()] - for childNode in node.getChilds(): + for childNode in node.getChildren(): newChildProt = newDict.get(childNode.run.getObjId(), None) if newChildProt: @@ -1246,7 +1262,7 @@ def getProtocolsDict(self, protocols=None, namesOnly=False): protId = prot.getObjId() node = g.getNode(prot.strId()) - for childNode in node.getChilds(): + for childNode in node.getChildren(): childId = childNode.run.getObjId() childProt = childNode.run if childId in newDict: @@ -1705,7 +1721,7 @@ def _checkInputAttr(node, pointed): parentNode.addChild(node) if os.environ.get('CHECK_CYCLIC_REDUNDANCY') and self._checkCyclicRedundancy(parentNode, node): conflictiveNodes = set() - for child in node.getChilds(): + for child in node.getChildren(): if node in child._parents: child._parents.remove(node) conflictiveNodes.add(child) @@ -1714,7 +1730,7 @@ def _checkInputAttr(node, pointed): child.getLabel() + '(' + child.getName() + ')')) for conflictNode in conflictiveNodes: - node._childs.remove(conflictNode) + node._children.remove(conflictNode) return False return True @@ -1747,7 +1763,7 @@ def _checkCyclicRedundancy(parent, child): def depthFirstSearch(node): visitedNodes.add(node) recursionStack.add(node) - for child in node.getChilds(): + for child in node.getChildren(): if child not in visitedNodes: if depthFirstSearch(child): return True diff --git a/pyworkflow/project/scripts/create.py b/pyworkflow/project/scripts/create.py index ace576704..b58ac3fd6 100644 --- a/pyworkflow/project/scripts/create.py +++ b/pyworkflow/project/scripts/create.py @@ -31,24 +31,28 @@ from pyworkflow.project import Manager import pyworkflow.utils as pwutils +EMPTY_ARG = "-" + def usage(error): + print(""" ERROR: %s - Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION] + Usage: scipion python -m pyworkflow.project.scripts.create NAME [WORKFLOW] [LOCATION] [COMMENT] NAME: project name WORKFLOW: path to a Scipion json workflow LOCATION: where to create it, defaults to scipion default location + COMMENT: project comment, location is mandatory in this case... for a NULL LOCATION pass %s This script will create a project project, optionally based on a workflow file - """ % error) + """ % (error, EMPTY_ARG)) sys.exit(1) n = len(sys.argv) -if n < 2 or n > 4: +if n < 2 or n > 5: usage("Incorrect number of input parameters") projName = sys.argv[1] @@ -56,6 +60,12 @@ def usage(error): jsonFile = None if n < 3 else os.path.abspath(sys.argv[2]) location = None if n < 4 else sys.argv[3] +# Location with - is None +if location == EMPTY_ARG: + location = None + +comment = None if n < 5 else sys.argv[4] + # This might not be working anymore for python3. # I'm getting invalid ELF header triggered by matplotlib -->from . import _tkagg # path = pw.join('gui', 'no-tkinter') @@ -71,7 +81,7 @@ def usage(error): if jsonFile is not None and not os.path.exists(jsonFile): usage("Nonexistent json file: %s" % pwutils.red(jsonFile)) -project = manager.createProject(projName, location=location) +project = manager.createProject(projName, location=location, comment=comment) if jsonFile is not None: protDict = project.loadProtocols(jsonFile) diff --git a/pyworkflow/project/scripts/schedule.py b/pyworkflow/project/scripts/schedule.py index f8e7e93fa..8885ba84f 100644 --- a/pyworkflow/project/scripts/schedule.py +++ b/pyworkflow/project/scripts/schedule.py @@ -92,7 +92,7 @@ def usage(error): # and the graph is lineal for root in roots: - for child in root.getChilds(): + for child in root.getChildren(): workflow, _ = project._getSubworkflow(child.run) for prot, level in workflow.values(): protLabelName = prot.getObjLabel() diff --git a/pyworkflow/protocol/constants.py b/pyworkflow/protocol/constants.py index 721fbad95..a56eb9dbf 100644 --- a/pyworkflow/protocol/constants.py +++ b/pyworkflow/protocol/constants.py @@ -75,6 +75,7 @@ # Param names for GPU processing USE_GPU = 'useGpu' GPU_LIST = 'gpuList' +VOID_GPU = 99 # Job management UNKNOWN_JOBID = -1 diff --git a/pyworkflow/protocol/executor.py b/pyworkflow/protocol/executor.py index 586bb6407..6fa2a4196 100644 --- a/pyworkflow/protocol/executor.py +++ b/pyworkflow/protocol/executor.py @@ -69,7 +69,7 @@ def runJob(self, log, programName, params, numberOfMpi, numberOfThreads, self.hostConfig, env=env, cwd=cwd, gpuList=self.getGpuList(), executable=executable) - + def _getRunnable(self, steps, n=1): """ Return the n steps that are 'new' and all its dependencies have been finished, or None if none ready. @@ -79,11 +79,16 @@ def _getRunnable(self, steps, n=1): for s in steps: if (s.getStatus() == cts.STATUS_NEW and all(steps[i-1].isFinished() for i in s._prerequisites)): - rs.append(s) - if len(rs) == n: - break + + if self._isStepRunnable(s): + rs.append(s) + if len(rs) == n: + break return rs - + def _isStepRunnable(self, step): + """ Should be implemented by inherited classes to test extra conditions """ + return True + def _arePending(self, steps): """ Return True if there are pending steps (either running or waiting) that can be done and thus enable other steps to be executed. @@ -169,28 +174,133 @@ def __init__(self, hostConfig, nThreads, **kwargs): # all the threads self.gpuDict = {} + self._assignGPUperNode() + + def _assignGPUperNode(self): + # If we have GPUs if self.gpuList: - nodes = range(nThreads) + + nThreads = self.numberOfProcs + + # Nodes: each concurrent steps + nodes = range(1, nThreads+1) + + # Number of GPUs nGpu = len(self.gpuList) + # If more GPUs than threads if nGpu > nThreads: - chunk = int(nGpu / nThreads) - for i, node in enumerate(nodes): - self.gpuDict[node] = list(self.gpuList[i*chunk:(i+1)*chunk]) + + # Get the ratio: 2 GPUs per thread? 3 GPUs per thread? + # 3 GPU and 2 threads is rounded to 1 (flooring) + step = int(nGpu / nThreads) + spare = nGpu % nThreads + fromPos = 0 + # For each node(concurrent thread) + for node in nodes: + # Store the GPUS per thread: + # GPUs: 0 1 2 + # Threads 2 (step 1) + # Node 0 : GPU 0 1 + # Node 1 : GPU 2 + + extraGpu = 1 if spare>0 else 0 + toPos = fromPos + step +extraGpu + gpusForNode = list(self.gpuList[fromPos:toPos]) + + newGpusForNode = self.cleanVoidGPUs(gpusForNode) + if len(newGpusForNode) == 0: + logger.info("Gpu slot cancelled: all were null Gpus -> %s" % gpusForNode) + else: + logger.info("GPUs %s assigned to node %s" % (newGpusForNode, node)) + self.gpuDict[-node] = newGpusForNode + + fromPos = toPos + spare-=1 + else: # Expand gpuList repeating until reach nThreads items if nThreads > nGpu: - newList = self.gpuList * (int(nThreads/nGpu)+1) - self.gpuList = newList[:nThreads] - - for node, gpu in zip(nodes, self.gpuList): - self.gpuDict[node] = [gpu] + logger.warning("GPUs are no longer extended. If you want all GPUs to match threads repeat as many " + "GPUs as threads.") + # newList = self.gpuList * (int(nThreads / nGpu) + 1) + # self.gpuList = newList[:nThreads] + + for index, gpu in enumerate(self.gpuList): + + if gpu == cts.VOID_GPU: + logger.info("Void GPU (%s) found in the list. Skipping the slot." % cts.VOID_GPU) + else: + logger.info("GPU slot for gpu %s." % gpu) + # Any negative number in the key means a free gpu slot. can't be 0! + self.gpuDict[-index-1] = [gpu] + + def cleanVoidGPUs(self, gpuList): + newGPUList=[] + for gpuid in gpuList: + if gpuid == cts.VOID_GPU: + logger.info("Void GPU detected in %s" % gpuList) + else: + newGPUList.append(gpuid) + return newGPUList def getGpuList(self): """ Return the GPU list assigned to current thread or empty list if not using GPUs. """ - return self.gpuDict.get(threading.current_thread().thId, []) - + + # If the node id has assigned gpus? + nodeId = threading.current_thread().thId + if nodeId in self.gpuDict: + gpus = self.gpuDict.get(nodeId) + logger.info("Reusing GPUs (%s) slot for %s" % (gpus, nodeId)) + return gpus + else: + + gpus = self.getFreeGpuSlot(nodeId) + if gpus is None: + logger.warning("Step on node %s is requesting GPUs but there isn't any available. Review configuration of threads/GPUs. Returning and empty list." % nodeId) + return [] + else: + return gpus + def getFreeGpuSlot(self, nodeId=None): + """ Returns a free gpu slot available or None. If node is passed it also reserves it for that node + + :param node: node to make the reserve of Gpus + """ + for node in self.gpuDict.keys(): + # This is a free node. Book it + if node < 0: + gpus = self.gpuDict[node] + + if nodeId is not None: + self.gpuDict.pop(node) + self.gpuDict[nodeId] = gpus + logger.info("GPUs %s assigned to thread %s" % (gpus, nodeId)) + else: + logger.info("Free gpu slot found at %s" % node) + return gpus + + return None + def freeGpusSlot(self, node): + gpus = self.gpuDict.get(node, None) + + # Some nodes/threads do not use gpus so may not be booked and not in the dictionary + if gpus is not None: + self.gpuDict.pop(node) + self.gpuDict[-node-1] = gpus + logger.info("GPUs %s freed from thread %s" % (gpus, node)) + else: + logger.debug("node %s not found in GPU slots" % node) + + def _isStepRunnable(self, step): + """ Overwrite this method to check GPUs availability""" + + if self.gpuList and step.needsGPU() and self.getFreeGpuSlot() is None: + logger.info("Can't run step %s. Needs gpus and there are no free gpu slots" % step) + return False + + return True + def runSteps(self, steps, stepStartedCallback, stepFinishedCallback, @@ -213,7 +323,9 @@ def runSteps(self, steps, sharedLock = threading.Lock() runningSteps = {} # currently running step in each node ({node: step}) - freeNodes = list(range(self.numberOfProcs)) # available nodes to send jobs + freeNodes = list(range(1, self.numberOfProcs+1)) # available nodes to send jobs + logger.info("Execution threads: %s" % freeNodes) + logger.info("Running steps using %s threads. 1 thread is used for this main proccess." % self.numberOfProcs) while True: # See which of the runningSteps are not really running anymore. @@ -225,6 +337,7 @@ def runSteps(self, steps, for node in nodesFinished: step = runningSteps.pop(node) # remove entry from runningSteps freeNodes.append(node) # the node is available now + self.freeGpusSlot(node) # Notify steps termination and check if we should continue doContinue = stepFinishedCallback(step) if not doContinue: @@ -245,8 +358,9 @@ def runSteps(self, steps, anyLaunched = True step.setRunning() stepStartedCallback(step) - node = freeNodes.pop() # take an available node + node = freeNodes.pop(0) # take an available node runningSteps[node] = step + logger.debug("Running step %s on node %s" % (step, node)) t = StepThread(node, step, sharedLock) # won't keep process up if main thread ends t.daemon = True @@ -255,7 +369,7 @@ def runSteps(self, steps, if not anyLaunched: if anyPending: # nothing running - time.sleep(0.5) + time.sleep(3) else: break # yeah, we are done, either failed or finished :) diff --git a/pyworkflow/protocol/params.py b/pyworkflow/protocol/params.py index 9934e1c84..4d1911293 100644 --- a/pyworkflow/protocol/params.py +++ b/pyworkflow/protocol/params.py @@ -345,18 +345,15 @@ def addParallelSection(self, threads=1, mpi=8, condition="", self.addParam('hostName', StringParam, default="localhost", label='Execution host', help='Select in which of the available do you want to launch this protocol.') + + # NOTE: help messages for these parameters is defined at HELP_MPI_THREADS and used in form.py. + if threads > 0: self.addParam('numberOfThreads', IntParam, default=threads, - label='Threads', - help='This option provides shared-memory parallelization on multi-core machines.' - 'It does not require any additional software.') + label='Threads') if mpi > 0: self.addParam('numberOfMpi', IntParam, default=mpi, - label='MPI processes', - help='This option provides the number of independent processes spawned' - 'in parallel by command in a cluster, usually through' - 'a queue system. This will require that you have compile this software ' - 'with support.') + label='MPI processes') if jobsize > 0: self.addParam('mpiJobSize', IntParam, default=jobsize, label='MPI job size', condition="numberOfMpi>1", diff --git a/pyworkflow/protocol/protocol.py b/pyworkflow/protocol/protocol.py index 42777a08e..cbef03bb7 100644 --- a/pyworkflow/protocol/protocol.py +++ b/pyworkflow/protocol/protocol.py @@ -25,8 +25,7 @@ This modules contains classes required for the workflow execution and tracking like: Step and Protocol """ -import contextlib -import sys, os +import os import json import threading import time @@ -36,7 +35,7 @@ from pyworkflow.exceptions import ValidationException, PyworkflowException from pyworkflow.object import * import pyworkflow.utils as pwutils -from pyworkflow.utils.log import LoggingConfigurator, getExtraLogInfo, STATUS, setDefaultLoggingContext +from pyworkflow.utils.log import getExtraLogInfo, STATUS, setDefaultLoggingContext from .executor import StepExecutor, ThreadStepExecutor, QueueStepExecutor from .constants import * from .params import Form @@ -50,21 +49,24 @@ class Step(Object): """ Basic execution unit. - It should defines its Input, Output + It should define its Input, Output and define a run method. """ - def __init__(self, **kwargs): - Object.__init__(self, **kwargs) + def __init__(self, interactive=False, needsGPU=True, **kwargs): + super().__init__() self._prerequisites = CsvList() # which steps needs to be done first self.status = String() self.initTime = String() self.endTime = String() self._error = String() - self.interactive = Boolean(False) + self.interactive = Boolean(interactive) self._resultFiles = String() + self._needsGPU = Boolean(needsGPU) self._index = None + def needsGPU(self) -> bool: + return self._needsGPU.get() def getIndex(self): return self._index @@ -214,7 +216,7 @@ def run(self): self.status.set(status) except PyworkflowException as e: - print(pwutils.redStr(str(e))) + logger.info(pwutils.redStr(str(e))) self.setFailed(str(e)) except Exception as e: self.setFailed(str(e)) @@ -230,7 +232,7 @@ class FunctionStep(Step): This class will ease the insertion of Protocol function steps through the function _insertFunctionStep""" - def __init__(self, func=None, funcName=None, *funcArgs, **kwargs): + def __init__(self, func=None, funcName=None, *funcArgs, wait=False, interactive=False, needsGPU=True): """ Params: func: the function that will be executed. @@ -238,13 +240,12 @@ def __init__(self, func=None, funcName=None, *funcArgs, **kwargs): *funcArgs: argument list passed to the function (serialized and stored) **kwargs: extra parameters. """ - Step.__init__(self) + super().__init__(interactive=interactive, needsGPU=needsGPU) self._func = func # Function should be set before run self._args = funcArgs self.funcName = String(funcName) self.argsStr = String(json.dumps(funcArgs, default=lambda x: None)) - self.setInteractive(kwargs.get('interactive', False)) - if kwargs.get('wait', False): + if wait: self.setStatus(STATUS_WAITING) def _runFunc(self): @@ -280,7 +281,7 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - return self.funcName.get() + return "%s - %s" % (self._objId ,self.funcName.get()) class RunJobStep(FunctionStep): @@ -1016,12 +1017,11 @@ def _defineParams(self, form): """ pass - def __insertStep(self, step, **kwargs): + def __insertStep(self, step, prerequisites=None): """ Insert a new step in the list. :param prerequisites: a single integer or a list with the steps index that need to be done previous to the current one.""" - prerequisites = kwargs.get('prerequisites', None) if prerequisites is None: if len(self._steps): @@ -1117,7 +1117,7 @@ def _getBasePath(self, path): """ return self._getPath(os.path.basename(path)) - def _insertFunctionStep(self, func, *funcArgs, **kwargs): + def _insertFunctionStep(self, func, *funcArgs, prerequisites=None, wait=False, interactive=False, needsGPU=True): """ Params: func: the function itself or, optionally, the name (string) of the function to be run in the Step. @@ -1135,24 +1135,24 @@ def _insertFunctionStep(self, func, *funcArgs, **kwargs): if not callable(func): raise Exception("Protocol._insertFunctionStep: '%s' is not callable" % func) - step = FunctionStep(func, func.__name__, *funcArgs, **kwargs) - - return self.__insertStep(step, **kwargs) - - def _insertRunJobStep(self, progName, progArguments, resultFiles=[], - **kwargs): - """ Insert an Step that will simple call runJob function - **args: see __insertStep - """ - return self._insertFunctionStep('runJob', progName, progArguments, - **kwargs) - - def _insertCopyFileStep(self, sourceFile, targetFile, **kwargs): - """ Shortcut function to insert an step for copying a file to a destiny. """ - step = FunctionStep(pwutils.copyFile, 'copyFile', sourceFile, - targetFile, - **kwargs) - return self.__insertStep(step, **kwargs) + step = FunctionStep(func, func.__name__, *funcArgs, wait=wait, interactive=interactive, needsGPU=needsGPU) + + return self.__insertStep(step,prerequisites) + + # def _insertRunJobStep(self, progName, progArguments, resultFiles=[], + # **kwargs): + # """ Insert an Step that will simple call runJob function + # **args: see __insertStep + # """ + # return self._insertFunctionStep('runJob', progName, progArguments, + # **kwargs) + # + # def _insertCopyFileStep(self, sourceFile, targetFile, **kwargs): + # """ Shortcut function to insert a step for copying a file to a destiny. """ + # step = FunctionStep(pwutils.copyFile, 'copyFile', sourceFile, + # targetFile, + # **kwargs) + # return self.__insertStep(step, **kwargs) def _enterDir(self, path): """ Enter into a new directory path and store the current path. @@ -1216,7 +1216,7 @@ def _insertPreviousSteps(self): protocol that allow some kind of continue (such as ctf estimation). """ for step in self.loadSteps(): - self.__insertStep(step) + self.__insertStep(step, ) def __findStartingStep(self): """ From a previous run, compare self._steps and self._prevSteps @@ -2538,7 +2538,7 @@ def __init__(self, **kwargs): self.stepsExecutionMode = STEPS_PARALLEL def _insertAllSteps(self): # Insert the step that generates the steps - self._insertFunctionStep(self.resumableStepGeneratorStep, str(datetime.now())) + self._insertFunctionStep(self.resumableStepGeneratorStep, str(datetime.now()), needsGPU=False) def resumableStepGeneratorStep(self, ts): """ This allow to resume protocols. ts is the time stamp so this stap is alway different form previous exceution""" diff --git a/pyworkflow/template.py b/pyworkflow/template.py index bf0019f50..03fe077d9 100644 --- a/pyworkflow/template.py +++ b/pyworkflow/template.py @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) class Template: - def __init__(self, source, name, description): + def __init__(self, source, name, description=""): self.source = source # Tidy up templates names: removing .json.template and .json (when passed as parameter) self.name = name diff --git a/pyworkflow/utils/graph.py b/pyworkflow/utils/graph.py index f9de517a6..0b0698c41 100644 --- a/pyworkflow/utils/graph.py +++ b/pyworkflow/utils/graph.py @@ -26,6 +26,8 @@ """ This module define a Graph class and some utilities """ +import logging +logger = logging.getLogger(__name__) class Node(object): @@ -33,7 +35,7 @@ class Node(object): _count = 1 def __init__(self, name=None, label=None): - self._childs = [] + self._children = [] self._parents = [] if name is None: @@ -44,6 +46,7 @@ def __init__(self, name=None, label=None): if label is None: label = name self._label = label + self._fixed = False def getName(self): return self._name @@ -57,13 +60,13 @@ def setLabel(self, newLabel): def isRoot(self): return len(self._parents) == 0 - def getChilds(self): - return self._childs + def getChildren(self): + return self._children def addChild(self, *nodes): for n in nodes: - if n not in self._childs: - self._childs.append(n) + if n not in self._children: + self._children.append(n) n._parents.append(self) def getParent(self): @@ -78,36 +81,36 @@ def getParent(self): def getParents(self): return self._parents - def iterChilds(self): - """ Iterate over all childs and subchilds. - Nodes can be visited more than once if have + def iterChildren(self): + """ Iterate over all children and sub-children. + Nodes can be visited more than once if it has more than one parent. """ - for child in self._childs: - for c in child.iterChilds(): + for child in self._children: + for c in child.iterChildren(): yield c yield self - def countChilds(self, visitedNode=None, count=0): + def countChildren(self, visitedNode=None, count=0): """ Iterate over all childs and subchilds. Nodes can be visited once """ - for child in self._childs: + for child in self._children: if child._name not in visitedNode: visitedNode[child._name] = True - child.countChilds(visitedNode) + child.countChildren(visitedNode) return len(visitedNode) - def iterChildsBreadth(self): + def iterChildrenBreadth(self): """ Iter child nodes in a breadth-first order """ - for child in self._childs: + for child in self._children: yield child - for child in self._childs: - for child2 in child.iterChildsBreadth(): + for child in self._children: + for child2 in child.iterChildrenBreadth(): yield child2 def __str__(self): @@ -133,13 +136,13 @@ def __init__(self, rootName='ROOT', root=None): def _registerNode(self, node): self._nodes.append(node) self._nodesDict[node.getName()] = node - for child in node.getChilds(): + for child in node.getChildren(): self._registerNode(child) - def getRoot(self): + def getRoot(self) -> Node: return self._root - def createNode(self, nodeName, nodeLabel=None): + def createNode(self, nodeName, nodeLabel=None) -> Node: """ Add a node to the graph """ node = Node(nodeName, nodeLabel) self._registerNode(node) @@ -150,7 +153,7 @@ def aliasNode(self, node, aliasName): """ Register an alias name for the node. """ self._nodesDict[aliasName] = node - def getNode(self, nodeName): + def getNode(self, nodeName) -> Node: return self._nodesDict.get(nodeName, None) def getNodeNames(self): @@ -164,33 +167,3 @@ def getRootNodes(self): """ Return all nodes that have no parent. """ return [n for n in self._nodes if n.isRoot()] - def printNodes(self): - for node in self.getNodes(): - print("Node: ", node) - print(" Childs: ", ','.join([c.getLabel() - for c in node.getChilds()])) - - def _escape(self, label): - return label.replace('.', '_').replace(' ', '_').replace('-', '_').replace('___', '_') - - def printDot(self, useId=True): - """ If useId is True, use the node id for label the graph. - If not, use the run name. - """ - - def getLabel(node): - if useId: - return node.getName() - else: - return node.getLabel() - - dotStr = "\ndigraph {\n" - - for node in self.getNodes(): - for child in node.getChilds(): - nodeLabel = self._escape(getLabel(node)) - childLabel = self._escape(getLabel(child)) - dotStr += " %s -> %s;\n" % (nodeLabel, childLabel) - dotStr += "}" - print(dotStr) - return dotStr diff --git a/pyworkflow/utils/properties.py b/pyworkflow/utils/properties.py index 51e4c4041..3c35d870f 100644 --- a/pyworkflow/utils/properties.py +++ b/pyworkflow/utils/properties.py @@ -29,7 +29,7 @@ This module defines the text used in the application. """ # NOTE: DO NOT REMOVE UNTIL plugin manager uses Config.SCIPION_MAIN_COLOR and is released -from pyworkflow.constants import Color +from pyworkflow.constants import Color, DOCSITEURLS from PIL import Image class Message: @@ -124,6 +124,7 @@ class Message: LABEL_PARALLEL = 'Parallel' LABEL_HOST = 'Host' LABEL_THREADS = 'Threads' + LABEL_SCIPION_THREADS = 'Scipion threads' LABEL_MPI = 'MPI' LABEL_QUEUE = 'Use a queue engine?' @@ -140,18 +141,21 @@ class Message: the *Continue* execution mode will try to continue from the last completed step. On the other hand, the *Restart* mode will clean the whole run directory and start from scratch. - """ - - HELP_MPI_THREADS = """ -Define the number of processors to be used in the execution. -*MPI*: This is a number of independent processes - that communicate through message passing - over the network (or the same computer). -*Threads*: This refers to different execution threads - in the same process that can share memory. They run in - the same computer. """ + HELP_PARALLEL_HEADER = 'Define the number of processors to be used in the execution.\nCheck %s for more detailed info.\n\n' % DOCSITEURLS.THREADS_MPIS_AND_GPUS + HELP_PARALLEL_MPI = ("*MPI*:\nThis is a number of independent processes" + " that communicate through message passing " + "over the network (or the same computer).\n") + HELP_PARALLEL_THREADS = ("*Threads*:\nThis refers to different execution threads in the same process that " + "can share memory. They run in the same computer. This value is an argument" + " passed to the program integrated") + + HELP_SCIPION_THREADS = ("*Scipion threads*:\n threads created by Scipion to run the steps." + " 1 thread is always used by the master/main process. Then extra threads will allow" + " this protocol to run several steps at the same time, taking always into account " + "restrictions to previous steps and 'theoretical GPU availability'") + HELP_USEQUEUE = """ Click Yes if you want to send this execution to a queue engine like Slurm, Torque, ... The queue commands to launch and stop jobs should be configured at @@ -446,7 +450,7 @@ class Sprite: @classmethod def getSpritesFile(cls): from pyworkflow import Config - return Config.SCIPION_SPRITES_FILE + return Config.getSpritesFile() @classmethod def loadSprites(cls): """ Loads the image of the sprite""" diff --git a/pyworkflowtests/protocols.py b/pyworkflowtests/protocols.py index 71ee38d0b..62728e98a 100644 --- a/pyworkflowtests/protocols.py +++ b/pyworkflowtests/protocols.py @@ -55,16 +55,16 @@ def sleepStep(self, t): def _insertAllSteps(self): print("Inserting all steps...") for i in range(self.numberOfSleeps.get()): - self._insertFunctionStep('sleepStep', i + 1) + self._insertFunctionStep(self.sleepStep, i + 1) class ParallelSleepingProtocol(SleepingProtocol): def _insertAllSteps(self): - step1 = self._insertFunctionStep('sleepStep', 1) + step1 = self._insertFunctionStep(self.sleepStep, 1) n = 2 deps = [step1] for i in range(n): - self._insertFunctionStep('sleepStep') + self._insertFunctionStep(self.sleepStep) class ConcurrencyProtocol(SleepingProtocol): """ Protocol to test concurrency access to sets""" diff --git a/pyworkflowtests/tests/test_protocol_execution.py b/pyworkflowtests/tests/test_protocol_execution.py index 9b2400c89..1bd4cae9e 100644 --- a/pyworkflowtests/tests/test_protocol_execution.py +++ b/pyworkflowtests/tests/test_protocol_execution.py @@ -22,11 +22,13 @@ # * e-mail address 'scipion@cnb.csic.es' # * # ************************************************************************** +import threading import pyworkflow.tests as pwtests import pyworkflow.mapper as pwmapper import pyworkflow.protocol as pwprot from pyworkflow.project import Project +from pyworkflow.protocol.constants import VOID_GPU # TODO: this test seems not to be finished. @@ -70,3 +72,64 @@ def test_StepExecutor(self): prot2 = mapper2.selectById(prot.getObjId()) self.assertEqual(prot.endTime.get(), prot2.endTime.get()) + + def test_gpuSlots(self): + """ Test gpu slots are properly composed in combination of threads""" + + # Test basic GPU setu methods + stepExecutor = pwprot.ThreadStepExecutor(None, 1, gpuList=None) + + + self.assertEqual(stepExecutor.cleanVoidGPUs([0,1]), [0,1], + "CleanVoidGpus does not work in absence of void GPUS") + + self.assertEqual(stepExecutor.cleanVoidGPUs([0, VOID_GPU]), [0], + "CleanVoidGpus does not work with a void GPU") + + self.assertEqual(stepExecutor.cleanVoidGPUs([VOID_GPU, VOID_GPU]), [], + "CleanVoidGpus does not work with all void GPU") + + + currThread = threading.currentThread() + currThread.thId = 1 + self.assertEqual(stepExecutor.getGpuList(),[], "Gpu list should be empty") + + # 2 threads 1 GPU + stepExecutor = pwprot.ThreadStepExecutor(None, 2, gpuList=[1]) + self.assertEqual(stepExecutor.getGpuList(),[1], "Gpu list should be [1]") + + currThread.thId = 2 + self.assertEqual(stepExecutor.getGpuList(),[], "Gpu list should be empty after a second request") + + + # 2 threads 3 GPUs + stepExecutor = pwprot.ThreadStepExecutor(None, 2, gpuList=[0,1,2]) + self.assertEqual(stepExecutor.getGpuList(),[0,1], "Gpu list should be [0,1]") + + currThread.thId = 1 + self.assertEqual(stepExecutor.getGpuList(),[2], "Gpu list should be [2] after a second request") + + + # 2 threads 4 GPUs with void gpus + stepExecutor = pwprot.ThreadStepExecutor(None, 2, gpuList=[0,1,2, VOID_GPU]) + self.assertEqual(stepExecutor.getGpuList(),[0,1], "Gpu list should be [0,1]") + + currThread.thId = 2 + self.assertEqual(stepExecutor.getGpuList(),[2], "Gpu list should be [2] after a second request without the void gpu") + + # less GPUs than threads. No extension should happen + stepExecutor = pwprot.ThreadStepExecutor(None, 4, gpuList=[0, VOID_GPU, 2]) + self.assertEqual(stepExecutor.getGpuList(), [0], "Gpu list should not be extended") + + currThread.thId = 1 + self.assertEqual(stepExecutor.getGpuList(), [2], + "Gpu list should be [2] after a second request, skipping the VOID gpu") + + currThread.thId = 3 + self.assertEqual(stepExecutor.getGpuList(), [], "Gpu list should be empty ather all GPU slots are busy") + + + + + +