Skip to content

Commit

Permalink
Merge pull request #7 from catalystneuro/spikegadgets_tools
Browse files Browse the repository at this point in the history
reorganized spikegadgets tools into a module
  • Loading branch information
pauladkisson authored Dec 2, 2024
2 parents fc2661c + c986e80 commit 5539005
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 172 deletions.
89 changes: 3 additions & 86 deletions src/jadhav_lab_to_nwb/olson_2024/olson_2024_behavior_interface.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
"""Primary class for converting experiment-specific behavior."""
from pynwb.file import NWBFile
from pydantic import FilePath, DirectoryPath
from pydantic import DirectoryPath
import numpy as np
import re
from pathlib import Path

from neuroconv.basedatainterface import BaseDataInterface
from neuroconv.tools import nwb_helpers
from neuroconv.utils import get_base_schema
from ndx_events import Events

from .tools.spikegadgets import readTrodesExtractedDataFile


class Olson2024BehaviorInterface(BaseDataInterface):
"""Behavior interface for olson_2024 conversion"""
Expand Down Expand Up @@ -63,87 +64,3 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict):
timestamps=timestamps,
)
behavior_module.add(event)


def readTrodesExtractedDataFile(filename: FilePath) -> dict:
"""Read Trodes Extracted Data File (.dat) and return as a dictionary.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
filename : FilePath
Path to the .dat file to read.
Returns
-------
dict
The contents of the .dat file as a dictionary
"""
with open(filename, "rb") as f:
# Check if first line is start of settings block
if f.readline().decode("ascii").strip() != "<Start settings>":
raise Exception("Settings format not supported")
fields = True
fieldsText = {}
for line in f:
# Read through block of settings
if fields:
line = line.decode("ascii").strip()
# filling in fields dict
if line != "<End settings>":
vals = line.split(": ")
fieldsText.update({vals[0].lower(): vals[1]})
# End of settings block, signal end of fields
else:
fields = False
dt = parseFields(fieldsText["fields"])
fieldsText["data"] = np.zeros([1], dtype=dt)
break
# Reads rest of file at once, using dtype format generated by parseFields()
dt = parseFields(fieldsText["fields"])
data = np.fromfile(f, dt)
fieldsText.update({"data": data})
return fieldsText


def parseFields(fieldstr: str) -> np.dtype:
"""Parse the fields string from a Trodes Extracted Data File and return as a numpy dtype.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
fieldstr : str
The fields string from a Trodes Extracted Data File.
Returns
-------
np.dtype
The fields string as a numpy dtype.
"""
# Returns np.dtype from field string
sep = re.split("\s", re.sub(r"\>\<|\>|\<", " ", fieldstr).strip())
# print(sep)
typearr = []
# Every two elmts is fieldname followed by datatype
for i in range(0, sep.__len__(), 2):
fieldname = sep[i]
repeats = 1
ftype = "uint32"
# Finds if a <num>* is included in datatype
if sep[i + 1].__contains__("*"):
temptypes = re.split("\*", sep[i + 1])
# Results in the correct assignment, whether str is num*dtype or dtype*num
ftype = temptypes[temptypes[0].isdigit()]
repeats = int(temptypes[temptypes[1].isdigit()])
else:
ftype = sep[i + 1]
try:
fieldtype = getattr(np, ftype)
except AttributeError:
print(ftype + " is not a valid field type.\n")
exit(1)
else:
typearr.append((str(fieldname), fieldtype, repeats))
return np.dtype(typearr)
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
"""Primary class for converting SpikeGadgets LFP data."""
from pynwb.file import NWBFile
from pathlib import Path
from pydantic import FilePath, DirectoryPath
from pydantic import DirectoryPath
import numpy as np
import re

from pynwb.ecephys import ElectricalSeries, LFP
from neuroconv import BaseDataInterface
from neuroconv.tools import nwb_helpers
from neuroconv.utils import get_base_schema

from .tools.spikegadgets import readTrodesExtractedDataFile


class Olson2024SpikeGadgetsLFPInterface(BaseDataInterface):
"""SpikeGadgets LFP interface for olson_2024 conversion"""
Expand Down Expand Up @@ -92,87 +93,3 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = Fal
description="Processed extracellular electrophysiology data.",
)
ecephys_module.add(lfp)


def readTrodesExtractedDataFile(filename: FilePath) -> dict:
"""Read Trodes Extracted Data File (.dat) and return as a dictionary.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
filename : FilePath
Path to the .dat file to read.
Returns
-------
dict
The contents of the .dat file as a dictionary
"""
with open(filename, "rb") as f:
# Check if first line is start of settings block
if f.readline().decode("ascii").strip() != "<Start settings>":
raise Exception("Settings format not supported")
fields = True
fieldsText = {}
for line in f:
# Read through block of settings
if fields:
line = line.decode("ascii").strip()
# filling in fields dict
if line != "<End settings>":
vals = line.split(": ")
fieldsText.update({vals[0].lower(): vals[1]})
# End of settings block, signal end of fields
else:
fields = False
dt = parseFields(fieldsText["fields"])
fieldsText["data"] = np.zeros([1], dtype=dt)
break
# Reads rest of file at once, using dtype format generated by parseFields()
dt = parseFields(fieldsText["fields"])
data = np.fromfile(f, dt)
fieldsText.update({"data": data})
return fieldsText


def parseFields(fieldstr: str) -> np.dtype:
"""Parse the fields string from a Trodes Extracted Data File and return as a numpy dtype.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
fieldstr : str
The fields string from a Trodes Extracted Data File.
Returns
-------
np.dtype
The fields string as a numpy dtype.
"""
# Returns np.dtype from field string
sep = re.split("\s", re.sub(r"\>\<|\>|\<", " ", fieldstr).strip())
# print(sep)
typearr = []
# Every two elmts is fieldname followed by datatype
for i in range(0, sep.__len__(), 2):
fieldname = sep[i]
repeats = 1
ftype = "uint32"
# Finds if a <num>* is included in datatype
if sep[i + 1].__contains__("*"):
temptypes = re.split("\*", sep[i + 1])
# Results in the correct assignment, whether str is num*dtype or dtype*num
ftype = temptypes[temptypes[0].isdigit()]
repeats = int(temptypes[temptypes[1].isdigit()])
else:
ftype = sep[i + 1]
try:
fieldtype = getattr(np, ftype)
except AttributeError:
print(ftype + " is not a valid field type.\n")
exit(1)
else:
typearr.append((str(fieldname), fieldtype, repeats))
return np.dtype(typearr)
Empty file.
88 changes: 88 additions & 0 deletions src/jadhav_lab_to_nwb/olson_2024/tools/spikegadgets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""Useful tools for dealing with spikegadgets data."""
from pydantic import FilePath
import numpy as np
import re


def readTrodesExtractedDataFile(filename: FilePath) -> dict:
"""Read Trodes Extracted Data File (.dat) and return as a dictionary.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
filename : FilePath
Path to the .dat file to read.
Returns
-------
dict
The contents of the .dat file as a dictionary
"""
with open(filename, "rb") as f:
# Check if first line is start of settings block
if f.readline().decode("ascii").strip() != "<Start settings>":
raise Exception("Settings format not supported")
fields = True
fieldsText = {}
for line in f:
# Read through block of settings
if fields:
line = line.decode("ascii").strip()
# filling in fields dict
if line != "<End settings>":
vals = line.split(": ")
fieldsText.update({vals[0].lower(): vals[1]})
# End of settings block, signal end of fields
else:
fields = False
dt = parseFields(fieldsText["fields"])
fieldsText["data"] = np.zeros([1], dtype=dt)
break
# Reads rest of file at once, using dtype format generated by parseFields()
dt = parseFields(fieldsText["fields"])
data = np.fromfile(f, dt)
fieldsText.update({"data": data})
return fieldsText


def parseFields(fieldstr: str) -> np.dtype:
"""Parse the fields string from a Trodes Extracted Data File and return as a numpy dtype.
Adapted from https://docs.spikegadgets.com/en/latest/basic/ExportFunctions.html
Parameters
----------
fieldstr : str
The fields string from a Trodes Extracted Data File.
Returns
-------
np.dtype
The fields string as a numpy dtype.
"""
# Returns np.dtype from field string
sep = re.split("\s", re.sub(r"\>\<|\>|\<", " ", fieldstr).strip())
# print(sep)
typearr = []
# Every two elmts is fieldname followed by datatype
for i in range(0, sep.__len__(), 2):
fieldname = sep[i]
repeats = 1
ftype = "uint32"
# Finds if a <num>* is included in datatype
if sep[i + 1].__contains__("*"):
temptypes = re.split("\*", sep[i + 1])
# Results in the correct assignment, whether str is num*dtype or dtype*num
ftype = temptypes[temptypes[0].isdigit()]
repeats = int(temptypes[temptypes[1].isdigit()])
else:
ftype = sep[i + 1]
try:
fieldtype = getattr(np, ftype)
except AttributeError:
print(ftype + " is not a valid field type.\n")
exit(1)
else:
typearr.append((str(fieldname), fieldtype, repeats))
return np.dtype(typearr)

0 comments on commit 5539005

Please sign in to comment.