diff --git a/+contrib/+tdt/TDTbin2mat.m b/+contrib/+tdt/TDTbin2mat.m index 443ba476..d8bfc3dc 100644 --- a/+contrib/+tdt/TDTbin2mat.m +++ b/+contrib/+tdt/TDTbin2mat.m @@ -253,7 +253,7 @@ % the selected block. Each file starts with a 1024 byte boolean channel % map indicating which channel's sort codes have been saved in the file. % Following this map, is a sort code field that maps 1:1 with the event - % ID for a given block. The event ID is essentially the Nth occurance of + % ID for a given block. The event ID is essentially the Nth occurrence of % an event on the entire TSQ file. % look for the exact one diff --git a/+io/createParsedType.m b/+io/createParsedType.m index 2454a750..ae50d915 100644 --- a/+io/createParsedType.m +++ b/+io/createParsedType.m @@ -29,7 +29,7 @@ [warningMessage, warningID] = lastwarn(); - % Handle any warnings if they occured. + % Handle any warnings if they occurred. if ~isempty(warningMessage) if strcmp( warningID, 'NWB:CheckUnset:InvalidProperties' ) diff --git a/+io/mapData2H5.m b/+io/mapData2H5.m index f318d1d2..363c0e22 100644 --- a/+io/mapData2H5.m +++ b/+io/mapData2H5.m @@ -56,7 +56,7 @@ %% Do Data Conversions switch class(data) case {'types.untyped.RegionView' 'types.untyped.ObjectView'} - %will throw errors if refdata DNE. Caught at NWBData level. + %will throw errors if refdata DNE (does not exist). Caught at NWBData level. data = io.getRefData(fid, data); case 'logical' % encode as int8 values. diff --git a/+misc/parseSkipInvalidName.m b/+misc/parseSkipInvalidName.m index 961118c0..63cfe304 100644 --- a/+misc/parseSkipInvalidName.m +++ b/+misc/parseSkipInvalidName.m @@ -1,5 +1,5 @@ function parseSkipInvalidName(parser, keywordArguments) -%PARSESKIPINVALIDNAME as parse() but without constraing on valid property names. +%PARSESKIPINVALIDNAME as parse() but without constraining on valid property names. validArgFlags = false(size(keywordArguments)); for i = 1:2:length(keywordArguments) diff --git a/+misc/str2validName.m b/+misc/str2validName.m index fa897c51..126e6c8c 100644 --- a/+misc/str2validName.m +++ b/+misc/str2validName.m @@ -1,7 +1,7 @@ function valid = str2validName(propname, prefix) % STR2VALIDNAME % Converts the property name into a valid matlab property name. -% propname: the offending propery name +% propname: the offending property name % prefix: optional prefix to use instead of the ambiguous "dyn" if ~iscell(propname) && isvarname(propname) valid = propname; diff --git a/+tests/+unit/PynwbTutorialTest.m b/+tests/+unit/PynwbTutorialTest.m index 946661be..6f687c15 100644 --- a/+tests/+unit/PynwbTutorialTest.m +++ b/+tests/+unit/PynwbTutorialTest.m @@ -25,7 +25,7 @@ 'object_id.py', ... % Does not export nwb file 'plot_configurator.py', ... % Does not export nwb file 'brain_observatory.py', ... % Requires allen sdk - 'extensions.py'}; % Discrepency between tutorial and schema: https://github.com/NeurodataWithoutBorders/pynwb/issues/1952 + 'extensions.py'}; % Discrepancy between tutorial and schema: https://github.com/NeurodataWithoutBorders/pynwb/issues/1952 % SkippedFiles - Name of exported nwb files to skip reading with matnwb SkippedFiles = {'family_nwb_file_0.nwb'} % requires family driver from h5py @@ -208,7 +208,7 @@ function installPythonDependencies(testCase) function pynwbFolder = downloadPynwb() githubUrl = 'https://github.com/NeurodataWithoutBorders/pynwb/archive/refs/heads/master.zip'; - pynwbFolder = downloadZippedGithubRepo(githubUrl, '.'); % Download in current direcory + pynwbFolder = downloadZippedGithubRepo(githubUrl, '.'); % Download in current directory end function repoFolder = downloadZippedGithubRepo(githubUrl, targetFolder) diff --git a/+tests/+util/verifyContainerEqual.m b/+tests/+util/verifyContainerEqual.m index 7d2d3257..0283ab54 100644 --- a/+tests/+util/verifyContainerEqual.m +++ b/+tests/+util/verifyContainerEqual.m @@ -35,7 +35,7 @@ function verifyContainerEqual(testCase, actual, expected, ignoreList) tests.util.verifyContainerEqual(testCase, actualValue.value, expectedValue.value); elseif isdatetime(expectedValue)... || (iscell(expectedValue) && all(cellfun('isclass', expectedValue, 'datetime'))) - % linux MATLAB doesn't appear to propery compare datetimes whereas + % linux MATLAB doesn't appear to properly compare datetimes whereas % Windows MATLAB does. This is a workaround to get tests to work % while getting close enough to exact date representation. actualValue = types.util.checkDtype(prop, 'datetime', actualValue); diff --git a/+types/+untyped/@DataStub/DataStub.m b/+types/+untyped/@DataStub/DataStub.m index 3571d803..7b9de872 100644 --- a/+types/+untyped/@DataStub/DataStub.m +++ b/+types/+untyped/@DataStub/DataStub.m @@ -118,7 +118,7 @@ elseif length(varargin) == 1 % note: you cannot leverage subsref here because when % load() is called, it's calling the builtin version of - % subsref, which apparantly poisons all calls in load() to + % subsref, which apparently poisons all calls in load() to % use builtin subsref. We use the internal load_mat_style % to workaround this. data = obj.load_mat_style(varargin{1}); diff --git a/+types/+untyped/DataPipe.m b/+types/+untyped/DataPipe.m index dcc41f52..fc02a258 100644 --- a/+types/+untyped/DataPipe.m +++ b/+types/+untyped/DataPipe.m @@ -11,7 +11,7 @@ % DATAPIPE(..., 'maxSize', MAXSIZE) Sets the maximum size of the HDF5 % Dataset. To append data later, use the MAXSIZE of the full % dataset. Inf on any axis will allow the Dataset to grow without - % limit in that dimension. If not provided, MAXSIZE is infered from + % limit in that dimension. If not provided, MAXSIZE is inferred from % the DATA. An error is thrown if neither MAXSIZE nor DATA is provided. % % DATAPIPE(..., 'axis', AXIS) Set which dimension axis to increment when @@ -114,7 +114,7 @@ warning('NWB:DataPipe:UnusedArguments',... ['Other keyword arguments were added along with a valid '... 'filename and path. Since the filename and path are valid, the '... - 'following extra properties will be superceded by the '... + 'following extra properties will be superseded by the '... 'configuration on file:\n%s'],... strjoin(formatted, newline)); end diff --git a/+util/loadEventAlignedSpikeTimes.m b/+util/loadEventAlignedSpikeTimes.m index 64af9a02..e02c4b60 100644 --- a/+util/loadEventAlignedSpikeTimes.m +++ b/+util/loadEventAlignedSpikeTimes.m @@ -4,7 +4,7 @@ % ST = LOADEVENTALIGNEDTIMESERIESDATA(NWB, UNIT_ID, EVENT_TIMES) returns % a cell array containing the spike times relative to the timestamps contained % in the EVENT_TIMES array. Optional arguments control the size of the -% temporal widnow within which spike times are included. +% temporal window within which spike times are included. % OPTIONAL KEYWORD ARGUMENTS % 'before_time' - specifies the time, in seconds, before the event for % the inclusion of spike times. Defaults to 1. diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000..7120bfee --- /dev/null +++ b/.codespellrc @@ -0,0 +1,3 @@ +[codespell] +skip = *.html,*logo_matnwb.svg,*fastsearch.m,*.yaml,*UpdateThirdPartyFromUpstream.sh,*testResults.xml +ignore-words-list = DNE,nd,whos diff --git a/README.md b/README.md index 314c1868..7f31926e 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,7 @@ NWB files use the HDF5 format to store data. There are two main differences betw The NWB schema has regular updates and is open to addition of new types along with modification of previously defined types. As such, certain type presumptions made by MatNWB may be invalidated in the future from a NWB schema. Furthermore, new types may require implementations that will be missing in MatNWB until patched in. For those planning on using matnwb alongside pynwb, please keep the following in mind: - - MatNWB is dependent on the schema, which may not necessary correspond with your PyNWB schema version. Please consider overwriting the contents within MatNWB's **~/schema/core** directory with the generating PyNWB's **src/pynwb/data directory** and running generateCore to ensure compatibilty between systems. + - MatNWB is dependent on the schema, which may not necessary correspond with your PyNWB schema version. Please consider overwriting the contents within MatNWB's **~/schema/core** directory with the generating PyNWB's **src/pynwb/data directory** and running generateCore to ensure compatibility between systems. The `master` branch in this repository is considered perpetually unstable. If you desire Matnwb's full functionality (full round-trip with nwb data), please consider downloading the more stable releases in the Releases tab. Most releases will coincide with nwb-schema releases and guarantee compatibility of new features introduced with the schema release along with backwards compatibility with all previous nwb-schema releases. diff --git a/tutorials/convertTrials.m b/tutorials/convertTrials.m index 7d5b5b1b..dbb216c0 100644 --- a/tutorials/convertTrials.m +++ b/tutorials/convertTrials.m @@ -349,7 +349,7 @@ % Though TimeIntervals is a subclass of the DynamicTable type, we opt for % populating the Dynamic Table data by column instead of using `addRow` % here because of how the data is formatted. DynamicTable is flexible -% enough to accomodate both styles of data conversion. +% enough to accommodate both styles of data conversion. trials_epoch = types.core.TimeIntervals(... 'colnames', {'start_time'}, ... 'description', 'trial data and properties', ... @@ -393,7 +393,7 @@ % ('unitx' where 'x' is some unit ID). %% -% Trial IDs, wherever they are used, are placed in a relevent |control| property in the +% Trial IDs, wherever they are used, are placed in a relevant |control| property in the % data object and will indicate what data is associated with what trial as % defined in |trials|'s |id| column. diff --git a/tutorials/dataPipe.m b/tutorials/dataPipe.m index 3f834248..714245d7 100644 --- a/tutorials/dataPipe.m +++ b/tutorials/dataPipe.m @@ -19,7 +19,7 @@ DataPipe = types.untyped.DataPipe('data', DataToCompress); %% -% This is the most basic way to acheive compression, and all of the +% This is the most basic way to achieve compression, and all of the % optimization decisions are automatically determined by MatNWB. %% Background % HDF5 has built-in ability to compress and decompress individual datasets. @@ -98,7 +98,7 @@ % resulting file size of 1.1MB. The chunk size was chosen such that it % spans each individual row of the matrix. % -% Use the combination of arugments that fit your need. +% Use the combination of arguments that fit your need. % When dealing with large datasets, you may want to use iterative write to % ensure that you stay within the bounds of your system memory and use % chunking and compression to optimize storage, read and write of the data.