Skip to content

Commit

Permalink
Merge pull request #13 from catalystneuro/tutorial
Browse files Browse the repository at this point in the history
Add tutorial
  • Loading branch information
alessandratrapani authored May 10, 2024
2 parents bfb1ab3 + 54213d1 commit 4c9aff7
Show file tree
Hide file tree
Showing 5 changed files with 534 additions and 41 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
"""Primary script to run to convert an entire session for of data using the NWBConverter."""

from pathlib import Path
from typing import Union
import os
from benisty_2022_convert_session import session_to_nwb



# Parameters for conversion
root_path = Path("/media/amtra/Samsung_T5/CN_data")
data_dir_path = root_path / "Higley-CN-data-share"
output_dir_path = root_path / "Higley-conversion_nwb/"

session_ids = os.listdir(data_dir_path)
stub_test = True
for session_id in session_ids:
session_folder = data_dir_path / Path(session_id)
if os.path.isdir(session_folder):
session_to_nwb(
folder_path=session_folder,
output_dir_path=output_dir_path,
session_id=session_id,
stub_test=stub_test,
)
34 changes: 20 additions & 14 deletions src/higley_lab_to_nwb/benisty_2022/benisty_2022_convert_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,28 @@
from higley_lab_to_nwb.benisty_2022.benisty_2022_spike2signals_interface import get_streams
from higley_lab_to_nwb.benisty_2022.benisty_2022_utils import create_tiff_stack, read_session_start_time
import os
import glob


def session_to_nwb(data_dir_path: Union[str, Path], output_dir_path: Union[str, Path], stub_test: bool = False):
def session_to_nwb(
folder_path: Union[str, Path], output_dir_path: Union[str, Path], session_id: str, stub_test: bool = False
):

data_dir_path = Path(data_dir_path)
output_dir_path = Path(output_dir_path)
if stub_test:
output_dir_path = output_dir_path / "nwb_stub"
output_dir_path.mkdir(parents=True, exist_ok=True)

session_id = "11222019_grabAM05_spont"
nwbfile_path = output_dir_path / f"{session_id}.nwb"

source_data = dict()
conversion_options = dict()

folder_path = data_dir_path / session_id
search_pattern = '_'.join(session_id.split("_")[:2])

# Add Analog signals from Spike2
file_path = str(folder_path / f"{session_id}_spike2.smrx")
smrx_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*.smrx"))
file_path = smrx_files[0]
stream_ids, stream_names = get_streams(file_path=file_path)

TTLsignals_name_map = {
Expand Down Expand Up @@ -67,19 +69,19 @@ def session_to_nwb(data_dir_path: Union[str, Path], output_dir_path: Union[str,
)

conversion_options.update(dict(Spike2Signals=dict(stub_test=stub_test)))
# Add Imaging

# Add Imaging
sampling_frequency = 10.0
photon_series_index = 0

excitation_type_to_start_frame_index_mapping = dict(Blue=0, Violet=1, Green=2)
channel_to_frame_side_mapping = dict(Green="left", Red="right")
channel_to_frame_side_mapping = dict(Green="right", Red="left")

for excitation_type in excitation_type_to_start_frame_index_mapping:
for channel in channel_to_frame_side_mapping:
start_frame_index = excitation_type_to_start_frame_index_mapping[excitation_type]
frame_side = channel_to_frame_side_mapping[channel]
tif_file_path = str(folder_path) + f"_channel{start_frame_index}_{frame_side}.tiff"
tif_file_path = str(folder_path) + f"/{session_id}_channel{start_frame_index}_{frame_side}.tiff"
if not os.path.exists(tif_file_path):
create_tiff_stack(
folder_path=folder_path,
Expand All @@ -105,13 +107,14 @@ def session_to_nwb(data_dir_path: Union[str, Path], output_dir_path: Union[str,
photon_series_index += 1

# Add Behavioral Video Recording
video_file_path = data_dir_path / session_id / f"{session_id}.avi"
avi_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*.avi"))
video_file_path = avi_files[0]
source_data.update(dict(Video=dict(file_paths=[video_file_path], verbose=False)))
conversion_options.update(dict(Video=dict(stub_test=stub_test, external_mode=False)))

# Add Facemap outpt
video_file_path = data_dir_path / session_id / f"{session_id}.avi"
mat_file_path = data_dir_path / session_id / f"{session_id}_proc.mat"
mat_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*_proc.mat"))
mat_file_path = mat_files[0]
source_data.update(
dict(
FacemapInterface=dict(mat_file_path=str(mat_file_path), video_file_path=str(video_file_path), verbose=False)
Expand Down Expand Up @@ -146,9 +149,12 @@ def session_to_nwb(data_dir_path: Union[str, Path], output_dir_path: Union[str,
data_dir_path = root_path / "Higley-CN-data-share"
output_dir_path = root_path / "Higley-conversion_nwb/"
stub_test = True

session_ids = os.listdir(data_dir_path)
session_id = '11222019_grabAM06_vis_stim'
folder_path = data_dir_path / Path(session_id)
session_to_nwb(
data_dir_path=data_dir_path,
folder_path= folder_path,
output_dir_path=output_dir_path,
session_id=session_id,
stub_test=stub_test,
)
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@
from neuroconv import BaseDataInterface
from neuroconv.tools import get_package
from neuroconv.utils import FilePathType
from neuroconv.tools.signal_processing import get_rising_frames_from_ttl
from neuroconv.tools.signal_processing import get_rising_frames_from_ttl, get_falling_frames_from_ttl
from spikeinterface.extractors import CedRecordingExtractor
from pynwb import NWBFile, TimeSeries
from ndx_events import TtlsTable, TtlTypesTable, EventsTable, EventTypesTable
from pynwb.epoch import TimeIntervals
from ndx_events import TtlsTable, TtlTypesTable


def _test_sonpy_installation() -> None:
Expand Down Expand Up @@ -93,11 +94,15 @@ def get_metadata(self) -> dict:

return metadata

def get_event_times_from_ttl(self, stream_id):
def get_event_times_from_ttl(self, stream_id, rising: bool = True):
extractor = CedRecordingExtractor(file_path=str(self.source_data["file_path"]), stream_id=stream_id)
times = extractor.get_times()
traces = extractor.get_traces()
event_times = get_rising_frames_from_ttl(traces)
if rising:
event_times = get_rising_frames_from_ttl(traces)
else:
event_times = get_falling_frames_from_ttl(traces)

return times[event_times]

def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = False) -> None:
Expand Down Expand Up @@ -126,31 +131,22 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = Fal

if self.stimulus_stream_ids_to_names_map is not None:

stimulus_types_table = EventTypesTable(
name="StimulusTypesTable",
description="Contains the type of stimulus signals from Spike2 output.",
)
stimuli_table = EventsTable(
name="StimuliTable",
description="Contains the stimulus signals onset times.",
target_tables={"event_type": stimulus_types_table},
)

for stimulus_type, (stream_id, stream_name) in enumerate(self.stimulus_stream_ids_to_names_map.items()):
timestamps = self.get_event_times_from_ttl(stream_id=stream_id)
stimulus_types_table.add_row(
event_name=stream_name,
event_type_description=f"The onset times of the {stream_name} event.",
)
if len(timestamps):
for timestamp in timestamps[:end_frame]:
stimuli_table.add_row(
event_type=stimulus_type,
timestamp=timestamp,
for stream_id, stream_name in self.stimulus_stream_ids_to_names_map.items():
intervals_table = TimeIntervals(
name=stream_name,
description=f"Intervals for each {stream_name}",
)
start_times = self.get_event_times_from_ttl(stream_id=stream_id)
stop_times = self.get_event_times_from_ttl(stream_id=stream_id,rising=False)

if len(start_times):
for start,stop in zip(start_times[:end_frame], stop_times[:end_frame]):
intervals_table.add_row(
start_time=start,
stop_time=stop,
)

nwbfile.add_acquisition(stimulus_types_table)
nwbfile.add_acquisition(stimuli_table)
nwbfile.add_time_intervals(intervals_table)

for stream_id, stream_name in self.behavioral_stream_ids_to_names_map.items():
extractor = CedRecordingExtractor(file_path=str(self.source_data["file_path"]), stream_id=stream_id)
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 4c9aff7

Please sign in to comment.