From 9f149a50fa63e5d4e327f96f79bf79d45d50f2eb Mon Sep 17 00:00:00 2001 From: Patrick Lehmann Date: Tue, 21 Mar 2023 20:59:59 +0100 Subject: [PATCH] New debugging technique in generating GraphML files of internal data structures. --- pyVHDLParser/Blocks/__init__.py | 12 ++--- pyVHDLParser/CLI/Block.py | 30 +++++++++++- pyVHDLParser/CLI/GraphML.py | 46 ++++++++++++++++++ pyVHDLParser/CLI/Token.py | 86 +++++---------------------------- pyVHDLParser/CLI/VHDLParser.py | 18 +++---- 5 files changed, 99 insertions(+), 93 deletions(-) create mode 100644 pyVHDLParser/CLI/GraphML.py diff --git a/pyVHDLParser/Blocks/__init__.py b/pyVHDLParser/Blocks/__init__.py index 7faee71b0..c54c8db31 100644 --- a/pyVHDLParser/Blocks/__init__.py +++ b/pyVHDLParser/Blocks/__init__.py @@ -241,7 +241,7 @@ class BlockIterator: state: int #: internal states: 0 = normal, 1 = reached stopBlock, 2 = reached EndOfBlock - def __init__(self, startBlock: 'Block', inclusiveStartBlock: bool=False, stopBlock: 'Block'=None): + def __init__(self, startBlock: 'Block', inclusiveStartBlock: bool=False, inclusiveStopBlock: bool=True, stopBlock: 'Block'=None): self.startBlock = startBlock self.currentBlock = startBlock if inclusiveStartBlock else startBlock.NextBlock self.stopBlock = stopBlock @@ -257,7 +257,7 @@ def __next__(self) -> 'Block': raise StopIteration(self.state) block = self.currentBlock - if block is self.stopToken: + if block is self.stopBlock: self.currentBlock = None self.state = 1 elif isinstance(self.currentBlock, EndOfBlock): @@ -341,11 +341,11 @@ def __iter__(self) -> TokenIterator: """Returns a token iterator that iterates from :attr:`~Block.StartToken` to :attr:`~Block.EndToken`.""" return TokenIterator(self.StartToken, inclusiveStartToken=True, stopToken=self.EndToken) - def GetIterator(self, stopBlock: 'Block'=None) -> BlockIterator: - return BlockIterator(self, stopBlock=stopBlock) + def GetIterator(self, inclusiveStartBlock: bool = False, inclusiveStopBlock: bool = True, stopBlock: 'Block'=None) -> BlockIterator: + return BlockIterator(self, inclusiveStartBlock=inclusiveStartBlock, inclusiveStopBlock=inclusiveStopBlock, stopBlock=stopBlock) - def GetReverseIterator(self, stopBlock: 'Block'=None) -> BlockReverseIterator: - return BlockReverseIterator(self, stopBlock=stopBlock) + def GetReverseIterator(self, inclusiveStartBlock: bool = False, inclusiveStopBlock: bool = True, stopBlock: 'Block'=None) -> BlockReverseIterator: + return BlockReverseIterator(self, inclusiveStartBlock=inclusiveStartBlock, inclusiveStopBlock=inclusiveStopBlock, stopBlock=stopBlock) def __str__(self) -> str: buffer = "" diff --git a/pyVHDLParser/CLI/Block.py b/pyVHDLParser/CLI/Block.py index 244de1110..21ff7bd9a 100644 --- a/pyVHDLParser/CLI/Block.py +++ b/pyVHDLParser/CLI/Block.py @@ -31,6 +31,7 @@ from pyAttributes.ArgParseAttributes import CommandAttribute +from .GraphML import GraphML from ..Base import ParserException from ..Token import Token, StartOfDocumentToken, EndOfDocumentToken from ..Token.Parser import Tokenizer @@ -53,6 +54,8 @@ class BlockStreamHandlers: def HandleBlockStreaming(self: FrontEndProtocol, args): self.PrintHeadline() + # self._writeLevel = Severity.Verbose + file = Path(args.Filename) if not file.exists(): @@ -64,8 +67,22 @@ def HandleBlockStreaming(self: FrontEndProtocol, args): tokenStream = Tokenizer.GetVHDLTokenizer(content) blockStream = TokenToBlockParser.Transform(tokenStream) + blockIterator = iter(blockStream) + firstBlock = next(blockIterator) + try: - for block in blockStream: + while next(blockIterator): + pass + except StopIteration: + pass + + if isinstance(firstBlock, StartOfDocumentBlock): + print("{YELLOW}{block!r}{NOCOLOR}".format(block=firstBlock, **self.Foreground)) + print(" {YELLOW}{token!r}{NOCOLOR}".format(token=firstBlock.StartToken, **self.Foreground)) + + try: + blockIterator = firstBlock.GetIterator(inclusiveStopBlock=False) + for block in blockIterator: if isinstance(block, (LinebreakBlock, IndentationBlock)): self.WriteNormal("{DARK_GRAY}{block!r}{NOCOLOR}".format(block=block, **self.Foreground)) elif isinstance(block, CommentBlock): @@ -84,11 +101,22 @@ def HandleBlockStreaming(self: FrontEndProtocol, args): for token in block: self.WriteVerbose(repr(token)) + blockIterator = block.GetIterator() + lastBlock = next(blockIterator) + if isinstance(lastBlock, EndOfDocumentBlock): + print("{YELLOW}{block!r}{NOCOLOR}".format(block=lastBlock, **self.Foreground)) + print(" {YELLOW}{token!r}{NOCOLOR}".format(token=lastBlock.StartToken, **self.Foreground)) + except ParserException as ex: print("{RED}ERROR: {0!s}{NOCOLOR}".format(ex, **self.Foreground)) except NotImplementedError as ex: print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **self.Foreground)) + exporter = GraphML() + exporter.AddTokenStream(firstBlock.StartToken) + # exporter.AddBlockStream(firstBlock) + exporter.WriteDocument(Path.cwd() / "temp/BlockStream.graphml") + self.exit() # ---------------------------------------------------------------------------- diff --git a/pyVHDLParser/CLI/GraphML.py b/pyVHDLParser/CLI/GraphML.py new file mode 100644 index 000000000..42d47da04 --- /dev/null +++ b/pyVHDLParser/CLI/GraphML.py @@ -0,0 +1,46 @@ +from pyTooling.Graph import Graph, Subgraph, Vertex +from pyTooling.Graph.GraphML import GraphMLDocument + +from pyVHDLParser.Token import Token + + +class GraphML: + _graph: Graph + + def __init__(self): + self._graph = Graph(name="Streams") + + def AddTokenStream(self, firstToken: Token): + subgraph = Subgraph(name="TokenStream", graph=self._graph) + + firstVertex = Vertex(vertexID=id(firstToken), value=f"{firstToken!s}", subgraph=subgraph) + firstVertex["order"] = 0 + firstVertex["kind"] = type(firstToken).__name__ + + tokenIterator = firstToken.GetIterator(inclusiveStopToken=False) + for tokenID, token in enumerate(tokenIterator, start=1): + vertex = Vertex(vertexID=id(token), value=f"{token!s}", subgraph=subgraph) + vertex["order"] = tokenID + vertex["kind"] = type(token).__name__ + + tokenIterator = token.GetIterator() + lastToken = next(tokenIterator) + lastVertex = Vertex(vertexID=id(lastToken), value=f"{lastToken!s}", subgraph=subgraph) + lastVertex["order"] = tokenID + 1 + lastVertex["kind"] = type(lastToken).__name__ + + firstVertex.EdgeToVertex(subgraph._verticesWithID[id(firstToken.NextToken)], edgeID=f"n0_next") + tokenIterator = firstToken.GetIterator(inclusiveStopToken=False) + for tokenID, token in enumerate(tokenIterator, start=1): + vertex = subgraph._verticesWithID[id(token)] + vertex.EdgeToVertex(subgraph._verticesWithID[id(token.PreviousToken)], edgeID=f"n{tokenID}_prev") + vertex.EdgeToVertex(subgraph._verticesWithID[id(token.NextToken)], edgeID=f"n{tokenID}_next") + tokenIterator = token.GetIterator() + lastToken = next(tokenIterator) + lastVertex = subgraph._verticesWithID[id(lastToken)] + lastVertex.EdgeToVertex(subgraph._verticesWithID[id(lastToken.PreviousToken)], edgeID=f"n{tokenID + 1}_prev") + + def WriteDocument(self, path): + graphMLDocument = GraphMLDocument("Streams") + graphMLDocument.FromGraph(self._graph) + graphMLDocument.WriteToFile(path) diff --git a/pyVHDLParser/CLI/Token.py b/pyVHDLParser/CLI/Token.py index 94ad73231..e35873771 100644 --- a/pyVHDLParser/CLI/Token.py +++ b/pyVHDLParser/CLI/Token.py @@ -28,16 +28,18 @@ # ==================================================================================================================== # # from pathlib import Path -from textwrap import dedent from pyAttributes.ArgParseAttributes import CommandAttribute +from pyTooling.Graph import Graph, Vertex, Subgraph +from pyTooling.Graph.GraphML import GraphMLDocument -from ..Base import ParserException -from ..Token import StartOfDocumentToken, EndOfDocumentToken, CharacterToken, SpaceToken, WordToken, LinebreakToken, CommentToken, IndentationToken -from ..Token import CharacterTranslation, SingleLineCommentToken -from ..Token.Parser import Tokenizer +from pyVHDLParser.Base import ParserException +from pyVHDLParser.CLI.GraphML import GraphML +from pyVHDLParser.Token import StartOfDocumentToken, EndOfDocumentToken, CharacterToken, SpaceToken, WordToken, LinebreakToken, CommentToken, IndentationToken +from pyVHDLParser.Token import CharacterTranslation, SingleLineCommentToken +from pyVHDLParser.Token.Parser import Tokenizer -from . import FrontEndProtocol, FilenameAttribute, translate +from pyVHDLParser.CLI import FrontEndProtocol, FilenameAttribute, translate class TokenStreamHandlers: @@ -94,75 +96,9 @@ def HandleTokenize(self: FrontEndProtocol, args): except NotImplementedError as ex: print("{RED}NotImplementedError: {0!s}{NOCOLOR}".format(ex, **self.Foreground)) - nodeFormat="t_{line}_{id}" - nodeID = 0 - line = 0 - node = nodeFormat.format(line=line, id=nodeID) - graphvizBuffer = dedent("""\ - digraph TokenStream {{ - graph [rankdir=LR splines=ortho] - node [shape=record]; - - {node} [style=filled, fillcolor=gold, label="{caption}|{{None|None|Next}}"]; - """).format( - node=node, - caption=firstToken.__class__.__qualname__ - ) - lline = 0 - sameRanked = [node] - lineStarts = [node] - - tokenIterator = firstToken.GetIterator(inclusiveStopToken=False) - for token in tokenIterator: - nodeID += 1 - nnode=nodeFormat.format(line=line, id=nodeID) - graphvizBuffer += dedent("""\ - {lnode} -> {node}; - {node} [style=filled, fillcolor={color}, label="{caption}|{{Prev|{content}|Next}}"]; - """).format( - node=nnode, - lnode=node, - color=translate(token), - caption=token.__class__.__qualname__, - content=CharacterTranslation(str(token)) - ) - node = nnode - if len(sameRanked) == 0: - lineStarts.append(node) - sameRanked.append(node) - - if isinstance(token, (LinebreakToken, SingleLineCommentToken)): - # graphvizBuffer += dedent("""\ - # - # {{ rank=same {nodes} }} - # - # """).format(nodes=" ".join(sameRanked)) - - sameRanked = [] - line += 1 - else: - lline = line - - tokenIterator = token.GetIterator() - lastToken = next(tokenIterator) - - graphvizBuffer += dedent("""\ - t_{lline}_{lid} -> t_{line}_00; - t_{line}_00 [style=filled, fillcolor=gold, label="{caption}|{{Prev|None|None}}"]; - - {{ rank=same {nodes} }} - }} - """).format( - line=line, - lline=lline, - lid=nodeID - 1, - caption=lastToken.__class__.__qualname__, - nodes=" ".join(lineStarts) - ) - - gvFile = file.with_suffix('.gv') - with gvFile.open('w') as fileHandle: - fileHandle.write(graphvizBuffer) + exporter = GraphML() + exporter.AddTokenStream(firstToken) + exporter.WriteDocument(Path.cwd() / "temp/TokenStream.graphml") self.exit() diff --git a/pyVHDLParser/CLI/VHDLParser.py b/pyVHDLParser/CLI/VHDLParser.py index ed9c0a84d..854e4bc53 100644 --- a/pyVHDLParser/CLI/VHDLParser.py +++ b/pyVHDLParser/CLI/VHDLParser.py @@ -69,8 +69,8 @@ class Application(LineTerminal, ArgParseMixin, TokenStreamHandlers, BlockStreamH # TODO: use pyTooling Platform __PLATFORM = platform_system() - def __init__(self, debug=False, verbose=False, quiet=False, sphinx=False): - super().__init__(verbose, debug, quiet) + def __init__(self): + super().__init__() # Late-initialize Block classes # -------------------------------------------------------------------------- @@ -104,11 +104,6 @@ def __init__(self, *args, **kwargs): add_help=False ) - # If executed in Sphinx to auto-document CLI arguments, exit now - # -------------------------------------------------------------------------- - if sphinx: - return - # Change error and warning reporting # -------------------------------------------------------------------------- self._LOG_MESSAGE_FORMAT__[Severity.Fatal] = "{DARK_RED}[FATAL] {message}{NOCOLOR}" @@ -199,13 +194,14 @@ def main(): # mccabe:disable=MC0001 """ from sys import argv as sys_argv - debug = "-d" in sys_argv - verbose = "-v" in sys_argv - quiet = "-q" in sys_argv - try: # handover to a class instance app = Application() # debug, verbose, quiet) + app.Configure( + verbose="-v" in sys_argv, + debug="-d" in sys_argv, + quiet="-q" in sys_argv + ) app.Run() app.exit()