Skip to content

Commit

Permalink
feat: update lems scripts to include version and commit information
Browse files Browse the repository at this point in the history
  • Loading branch information
sanjayankur31 committed Jun 18, 2024
1 parent 88aca25 commit a8eef09
Show file tree
Hide file tree
Showing 3 changed files with 164 additions and 69 deletions.
3 changes: 2 additions & 1 deletion scripts/lems/asttemplates.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@
---
{%- endif %}
Generated on {{ todays_date }}.
Schema against which LEMS based on these should be valid: [LEMS_v{{ lems_version }}.xsd](https://github.com/LEMS/LEMS/tree/{{ lems_branch }}/Schemas/LEMS/LEMS_v{{ lems_version }}.xsd).
Generated on {{ lems_date }} from [this](https://github.com/LEMS/LEMS/commit/{{ lems_commit }}) commit.
Please file any issues or questions at the [issue tracker here](https://github.com/LEMS/LEMS/issues).
---
Expand Down
3 changes: 3 additions & 0 deletions scripts/lems/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
lxml
xmltodict
jinja2
227 changes: 159 additions & 68 deletions scripts/lems/xml2md.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,11 @@
Copyright 2023 NeuroML contributors
"""

import tempfile
import subprocess
import re
import lxml
import lxml.etree as ET
import xmltodict
from datetime import date
import asttemplates
Expand All @@ -28,83 +33,169 @@
"components": ["Defining Components", ""]
}

todays_date = date.today().strftime("%d/%m/%y")
lems_branch = "master"
lems_version = "0.7.6"
GitHubRepo = "https://github.com/LEMS/LEMS.git"
lems_date = date.today().strftime("%d/%m/%y")
lems_commit = ""

parsed_data = {}

srcfile = "sourceannotations.xml"
destdir = "../../source/Userdocs/LEMS_elements/"

# populate our page info
with open(srcfile, 'r') as ast_doc:
elementtypes = xmltodict.parse(ast_doc.read())['ElementTypes']['ElementType']
for et in elementtypes:
try:
parsed_data[et['@section']].append(et)
except KeyError:
parsed_data[et['@section']] = []
parsed_data[et['@section']].append(et)

# add Include
parsed_data['root'].append(
OrderedDict(
{
'@name': 'Include',
'Info': 'Include LEMS files in other LEMS files. Files are included where the Include declaration occurs. The enclosing Lems block is stripped off and the rest of the content included as is',
'Property': OrderedDict(
{
'@name': 'file',
'@type': 'String',
'#text': 'the name or relative path of a file to be included'
}
)
}
)
)

print(parsed_data)

# render templates
for pg, pginfo in sections_pages.items():
outputfile = "{}/{}.md".format(destdir, pginfo[0].replace(" ", ""))
with open(outputfile, 'w') as ast_doc:
print(
asttemplates.page_header.render(section_data=pginfo, todays_date=todays_date),
file=ast_doc)
for et in parsed_data[pg]:
print(f"Rendering {et['@name']}")
print(
asttemplates.elementtype.render(et=et),
file=ast_doc)
if 'Property' in et or 'ListProperty' in et:
print("""`````{tab-set}""", end="", file=ast_doc)

comp_type_schema = {}

def get_schema_doc(schemafile):
"""Get schemas for everything
:param schemafile: path to the XSD schema file
"""
print(ET.__file__)
parser = lxml.etree.XMLParser(remove_comments=True,
remove_blank_text=True, ns_clean=True)
try:
tree = ET.parse(schemafile, parser=parser)
root = tree.getroot()
except ET.XMLSyntaxError as e:
print(f"Could not parse file {schemafile}: {e}")
namespaces = root.nsmap

# currently unused
for simple_type in root.findall("xs:simpleType", namespaces=namespaces):
simple_type_str = ET.tostring(simple_type, pretty_print=True,
encoding="unicode",
xml_declaration=False)

# needs to be lowerCamelCase to match XML core types
type_name = simple_type.attrib['name'].lower().replace("nml2quantity_", "")
comp_type_schema[type_name] = re.sub(r"Type.*name=",r"Type name=", simple_type_str)

for complex_type in root.findall("xs:complexType", namespaces=namespaces):
for node in complex_type:
if "annotation" in str(node.tag) or "documentation" in str(node.tag):
complex_type.remove(node)

complex_type_str = ET.tostring(complex_type, pretty_print=True,
encoding="unicode",
xml_declaration=False)
# needs to be lowerCamelCase to match XML core types
type_name = complex_type.attrib['name'].lower()
comp_type_schema[type_name] = re.sub(r"Type.*name=",r"Type name=", complex_type_str)


def main(srcdir, destdir):
"""TODO: Docstring for main.
:param arg1: TODO
:returns: TODO
"""
# If not defined or empty, download a new copy to a temporary directory
if not srcdir or src == "":
print("No src directory specified. Cloning NeuroML2 repo")
tempdir = tempfile.TemporaryDirectory()
tmpsrcdir = tempdir.name
print("Temporariy directory: {}".format(tmpsrcdir))
clone_command = ["git", "clone", "--depth", "1", "--branch", lems_branch, GitHubRepo, tmpsrcdir]
subprocess.run(clone_command)
else:
tmpsrcdir = srcdir

# TODO: add LEMS examples
# exampledirs = [tmpsrcdir + "/examples/", tmpsrcdir + "/LEMSexamples/"]
exampledirs = [tmpsrcdir + "/examples/"]
xsdsrc = tmpsrcdir + f"/Schemas/LEMS/LEMS_v{lems_version}.xsd"

# Get current commit
commit_command = ["git", "log", "-1", "--pretty=format:%H"]
output = subprocess.run(commit_command, capture_output=True,
cwd=tmpsrcdir, text=True)
lems_commit = output.stdout

# start
get_schema_doc(xsdsrc)

# populate our page info
with open(srcfile, 'r') as ast_doc:
elementtypes = xmltodict.parse(ast_doc.read())['ElementTypes']['ElementType']
for et in elementtypes:
try:
if type(et['Property']) == list:
props=et['Property']
else:
props=[et['Property']]
print(f" - {len(props)} properties: {props}")
print(
asttemplates.prop.render(props=props),
file=ast_doc)
parsed_data[et['@section']].append(et)
except KeyError:
pass
parsed_data[et['@section']] = []
parsed_data[et['@section']].append(et)

# add Include
parsed_data['root'].append(
OrderedDict(
{
'@name': 'Include',
'Info': 'Include LEMS files in other LEMS files. Files are included where the Include declaration occurs. The enclosing Lems block is stripped off and the rest of the content included as is',
'Property': OrderedDict(
{
'@name': 'file',
'@type': 'String',
'#text': 'the name or relative path of a file to be included'
}
)
}
)
)

try:
if type(et['ListProperty']) == list:
lprops=et['ListProperty']
else:
lprops=[et['ListProperty']]
print(f" - {len(lprops)} properties: {lprops}")
print(parsed_data)

# render templates
for pg, pginfo in sections_pages.items():
outputfile = "{}/{}.md".format(destdir, pginfo[0].replace(" ", ""))
with open(outputfile, 'w') as ast_doc:
print(
asttemplates.page_header.render(section_data=pginfo,
lems_date=lems_date,
lems_commit=lems_commit,
lems_version=lems_version,
lems_branch=lems_branch),
file=ast_doc)
for et in parsed_data[pg]:
print(f"Rendering {et['@name']}")
print(
asttemplates.listprop.render(lprops=lprops),
asttemplates.elementtype.render(et=et),
file=ast_doc)
except KeyError:
pass

# process them, close tab-set
if 'Property' in et or 'ListProperty' in et:
print("""`````""", end="", file=ast_doc)
if 'Property' in et or 'ListProperty' in et:
print("""`````{tab-set}""", end="", file=ast_doc)

try:
if isinstance(et['Property'], list):
props=et['Property']
else:
props=[et['Property']]
print(f" - {len(props)} properties: {props}")
print(
asttemplates.prop.render(props=props),
file=ast_doc)
except KeyError:
pass

try:
if isinstance(et['ListProperty'], list):
lprops=et['ListProperty']
else:
lprops=[et['ListProperty']]
print(f" - {len(lprops)} properties: {lprops}")
print(
asttemplates.listprop.render(lprops=lprops),
file=ast_doc)
except KeyError:
pass

# process them, close tab-set
if 'Property' in et or 'ListProperty' in et:
print("""`````""", end="", file=ast_doc)


# print(parsed_data)

if __name__ == "__main__":
# src = "/home/asinha/Documents/02_Code/00_mine/NeuroML/software/NeuroML2/"
src = None
destdir = "../../source/Userdocs/LEMS_elements/"
main(src, destdir)

0 comments on commit a8eef09

Please sign in to comment.