diff --git a/.gitignore b/.gitignore
index a13de5a1..68a90f5a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
# For private use
-private_tests.ipynb
+private-tests.ipynb
notes.md
# Byte-compiled / optimized / DLL files
diff --git a/README.rst b/README.rst
index a90f5e84..cc6051f4 100644
--- a/README.rst
+++ b/README.rst
@@ -3,827 +3,72 @@ wfdb-python
|Build Status|
-.. figure:: https://raw.githubusercontent.com/MIT-LCP/wfdb-python/master/demoimg.png
+.. figure:: https://raw.githubusercontent.com/MIT-LCP/wfdb-python/master/demo-img.png
:alt: wfdb signals
-Introduction
-------------
-
-Native python scripts for reading and writing WFDB signals and annotations. Package to be expanded with other useful functionalities.
-
-Installation
+Introduction
------------
-The distribution is hosted on pypi and directly installable via pip without needing clone or download this repository. Note that the pypi package does not contain the demo scripts or the example data. To install the package from pypi, run from your terminal:
-``pip install wfdb``
-
-Download or clone this repository https://github.com/MIT-LCP/wfdb-python for the latest development version, the demo scripts, and the example data. To install the downloaded package, change directory into the base directory of the repository and run:
-``pip install .``
-
-
-Usage
------
-
-See the **demo.ipynb** file for example cases.
-
-Objects
-~~~~~~~
-
-As of version 1.0.0, wfdb records are stored in **Record** or **MultiRecord** objects, and annotations are stored in **Annotation** objects. To see all attributes of an object, call `object.__dict__`
-
-
-**Record** - The class representing WFDB headers, and single segment WFDB records.
-
-Record objects can be created using the constructor, by reading a WFDB header
-with 'rdheader', or a WFDB record (header and associated dat files) with rdsamp'
-or 'srdsamp'.
-
-The attributes of the Record object give information about the record as specified
-by https://www.physionet.org/physiotools/wag/header-5.htm
-
-In addition, the d_signals and p_signals attributes store the digital and physical
-signals of WFDB records with at least one channel.
-
-Contructor function:
-::
-
- def __init__(self, p_signals=None, d_signals=None,
- recordname=None, nsig=None,
- fs=None, counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- filename=None, fmt=None, sampsperframe=None,
- skew=None, byteoffset=None, adcgain=None,
- baseline=None, units=None, adcres=None,
- adczero=None, initvalue=None, checksum=None,
- blocksize=None, signame=None, comments=None)
-
-Example Usage:
-::
-
- import wfdb
- record1 = wfdb.Record(recordname='r1', fs=250, nsig=2, siglen=1000,
- filename=['r1.dat','r1.dat'])
-
-
-**MultiRecord** - The class representing multi-segment WFDB records.
-
-MultiRecord objects can be created using the constructor, or by reading a multi-segment
-WFDB record using 'rdsamp' with the 'm2s' (multi to single) input parameter set to False.
-
-The attributes of the MultiRecord object give information about the entire record as specified
-by https://www.physionet.org/physiotools/wag/header-5.htm
-
-In addition, the 'segments' parameter is a list of Record objects representing each
-individual segment, or 'None' representing empty segments, of the entire multi-segment record.
-
-Noteably, this class has no attribute representing the signals as a whole. The 'multi_to_single'
-instance method can be called on MultiRecord objects to return a single segment representation
-of the record as a Record object. The resulting Record object will have its 'p_signals' field set.
-
-Contructor function:
-::
-
- def __init__(self, segments = None, layout = None,
- recordname=None, nsig=None, fs=None,
- counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- segname = None, seglen = None, comments=None)
-
-Example Usage:
-::
-
- import wfdb
- recordM = wfdb.MultiRecord(recordname='rm', fs=50, nsig=8, siglen=9999,
- segname=['rm_1', '~', rm_2'],
- seglen=[800, 200, 900])
-
- recordL = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s = False)
- recordL = recordL.multi_to_single()
-
-
-**Annotation** - The class representing WFDB annotations.
-
-Annotation objects can be created using the constructor, or by reading a WFDB annotation
-file with 'rdann'.
-
-The attributes of the Annotation object give information about the annotation as specified
-by https://www.physionet.org/physiotools/wag/annot-5.htm:
-
-- ``recordname``: The base file name (without extension) of the record that the annotation is attached to.
-- ``extension``: The file extension of the file the annotation is stored in.
-- ``sample``: The annotation locations in samples relative to the beginning of the record.
-- ``symbol``: The annotation type according the the standard WFDB codes.
-- ``subtype``: The marked class/category of each annotation.
-- ``chan``: The signal channel associated with each annotations.
-- ``num``: The labelled annotation number for each annotation.
-- ``aux_note``: The auxiliary information string for each annotation.
-- ``fs``: The sampling frequency of the record, if available.
-- ``label_store``: The integer value used to store/encode each annotation label
-- ``description``: The descriptive string of each annotation label
-- ``custom_labels``: The custom annotation labels defined in the annotation file.
- Maps the relationship between the three label fields.
- The data type is a pandas DataFrame with three columns: ['label_store', 'symbol', 'description']
-- ``contained_labels``: The unique labels contained in this annotation. Same structure
- as custom_labels.
-
-
-Constructor function:
-::
- def __init__(self, recordname, extension, sample, symbol=None, subtype=None,
- chan=None, num=None, aux_note=None, fs=None, label_store=None,
- description=None, custom_labels=None, contained_labels=None)
-
-Call `showanncodes()` to see the list of standard annotation codes. Any text used to label annotations that are not one of these codes should go in the 'aux_note' field rather than the 'symbol' field.
-
-Example usage:
-::
-
- import wfdb
- ann1 = wfdb.Annotation(recordname='ann1', annotator='atr', sample=[10,20,400],
- symbol = ['N','N','['], aux_note=[None, None, 'Serious Vfib'])
-
-Reading Signals
-~~~~~~~~~~~~~~~
-
-
-**rdsamp** - Read a WFDB record and return the signal and record descriptors as attributes in a Record or MultiRecord object.
-
-::
-
- record = rdsamp(recordname, sampfrom=0, sampto=None, channels=None,
- physical=True, pbdir = None, m2s=True)
-
-Example Usage:
-
-::
-
- import wfdb
- ecgrecord = wfdb.rdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
-
-Input Arguments:
-
-- ``recordname`` (required): The name of the WFDB record to be read (without any file extensions).
-- ``sampfrom`` (default=0): The starting sample number to read for each channel.
-- ``sampto`` (default=length of entire signal)- The final sample number to read for each channel.
-- ``channels`` (default=all channels): Indices specifying the channels to be returned.
-- ``physical`` (default=True): Flag that specifies whether to return signals in physical (True) or digital (False) units.
-- ``pbdir`` (default=None): Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
-- ``m2s`` (default=True): Flag used only for multi-segment records. Specifies whether to convert the returned wfdb.MultiRecord object into a wfdb.Record object (True) or not (False).
-- ``smoothframes`` (default=True): Flag used when reading records with signals having multiple samples per frame. Specifies whether to smooth the samples in signals with more than one sample per frame and return an mxn uniform numpy array as the d_signals or p_signals field (True), or to return a list of 1d numpy arrays containing every expanded sample as the e_d_sign.als or e_p_signals field (False).
-- ``ignoreskew`` (default=False): Flag used when reading records with at least one skewed signal. Specifies whether to apply the skew to align the signals in the output variable (False), or to ignore the skew field and load in all values contained in the dat files unaligned (True).
-- ``returnres`` (default=64): The numpy array dtype of the returned signals. Options are: 64, 32, 16, and 8, where the value represents the numpy int or float dtype. Note that the value cannot be 8 when physical is True since there is no float8 format.
-
-Output Arguments:
-
-- ``record`` - The wfdb Record or MultiRecord object representing the contents of the record read.
-
-**srdsamp** - A simplified wrapper function around rdsamp. Read a WFDB record and return the physical signal and a few important descriptor fields.
-
-::
-
- signals, fields = srdsamp(recordname, sampfrom=0, sampto=None, channels=None, pbdir=None)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, fields = wfdb.srdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
-
-Input arguments:
-
-- ``recordname`` (required): The name of the WFDB record to be read (without any file extensions). If the argument contains any path delimiter characters, the argument will be interpreted as PATH/baserecord and the data files will be searched for in the local path.
-- ``sampfrom`` (default=0): The starting sample number to read for each channel.
-- ``sampto`` (default=None): The sample number at which to stop reading for each channel.
-- ``channels`` (default=all): Indices specifying the channel to be returned.
-
-Output arguments:
-
-- ``signals``: A 2d numpy array storing the physical signals from the record.
-- ``fields``: A dictionary specifying several key attributes of the read record:
-- ``fs``: The sampling frequency of the record
-- ``units``: The units for each channel
-- ``signame``: The signal name for each channel
-- ``comments``: Any comments written in the header
-
-
-Converting between Analog and Digital Values
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When reading signal sample values into ``record`` objects using ``rdsamp``, the samples are stored in either the ``p_signals`` or the ``d_signals`` field depending on the specified return type (``physical`` = ``True`` or ``False`` respectively).
-
-One can also use existing objects to obtain physical values from digital values and vice versa, without having to re-read the wfdb file with a different set of options. The two following instance methods perform the conversions.
-
-
-**adc** - Performs analogue to digital conversion of the physical signal stored in p_signals if expanded is False, or e_p_signals if expanded is True. The p_signals/e_p_signals, fmt, gain, and baseline fields must all be valid. If inplace is True, the adc will be performed inplace on the variable, the d_signals/e_d_signals attribute will be set, and the p_signals/e_p_signals field will be set to None.
-
-::
-
- record.adc(self, expanded=False, inplace=False)
-
-Input arguments:
-
-- ``expanded`` (default=False): Boolean specifying whether to transform the e_p_signals attribute (True) or the p_signals attribute (False).
-- ``inplace`` (default=False): Boolean specifying whether to automatically set the object's corresponding digital signal attribute and set the physical signal attribute to None (True), or to return the converted signal as a separate variable without changing the original physical signal attribute (False).
-
-Possible output argument:
-
-- ``d_signals``: The digital conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays.
-
-Example Usage:
-
-::
-
- import wfdb
- record = wfdb.rdsamp('sampledata/100')
- d_signal = record.adc()
- record.adc(inplace=True)
- record.dac(inplace=True)
-
-
-**dac** - Performs digital to analogue conversion of the digital signal stored in d_signals if expanded is False, or e_d_signals if expanded is True. The d_signals/e_d_signals, fmt, gain, and baseline fields must all be valid. If inplace is True, the dac will be performed inplace on the variable, the p_signals/e_p_signals attribute will be set, and the d_signals/e_d_signals field will be set to None.
-
-::
-
- record.dac(self, expanded=False, inplace=False)
-
-Input arguments:
-
-- ``expanded`` (default=False): Boolean specifying whether to transform the e_d_signals attribute (True) or the d_signals attribute (False).
-- ``inplace`` (default=False): Boolean specifying whether to automatically set the object's corresponding physical signal attribute and set the digital signal attribute to None (True), or to return the converted signal as a separate variable without changing the original digital signal attribute (False).
-
-Possible output argument:
-
-- ``p_signals``: The physical conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays.
-
-Example Usage:
-
-::
-
- import wfdb
- record = wfdb.rdsamp('sampledata/100', physical=False)
- p_signal = record.dac()
- record.dac(inplace=True)
- record.adc(inplace=True)
-
-
-Writing Signals
-~~~~~~~~~~~~~~~
-
-The Record class has a **wrsamp** instance method for writing wfdb record files. Create a valid Record object and call ``record.wrsamp()``. If you choose this more advanced method, see also the `setdefaults`, `set_d_features`, and `set_p_features` instance methods to help populate attributes. In addition, there is also the following simpler module level **wrsamp** function.
-
-
-**wrsamp** - Write a single segment WFDB record, creating a WFDB header file and any associated dat files.
-
-::
-
- wrsamp(recordname, fs, units, signames, p_signals=None, d_signals=None,
- fmt=None, gain=None, baseline=None, comments=None, basetime=None,
- basedate=None)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1],
- pbdir = 'challenge/2015/training')
- wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'],
- signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16'])
-
-Input Arguments:
-
-- ``recordname`` (required): The string name of the WFDB record to be written (without any file extensions).
-- ``fs`` (required): The numerical sampling frequency of the record.
-- ``units`` (required): A list of strings giving the units of each signal channel.
-- ``signames`` (required): A list of strings giving the signal name of each signal channel.
-- ``p_signals`` (default=None): An MxN 2d numpy array, where M is the signal length. Gives the physical signal
- values intended to be written. Either p_signals or d_signals must be set, but not both. If p_signals
- is set, this method will use it to perform analogue-digital conversion, writing the resultant digital
- values to the dat file(s). If fmt is set, gain and baseline must be set or unset together. If fmt is
- unset, gain and baseline must both be unset.
-- ``d_signals`` (default=None): An MxN 2d numpy array, where M is the signal length. Gives the digital signal
- values intended to be directly written to the dat file(s). The dtype must be an integer type. Either
- p_signals or d_signals must be set, but not both. In addition, if d_signals is set, fmt, gain and baseline
- must also all be set.
-- ``fmt`` (default=None): A list of strings giving the WFDB format of each file used to store each channel.
- Accepted formats are: "80","212","16","24", and "32". There are other WFDB formats but this library
- will not write (though it will read) those file types.
-- ``gain`` (default=None): A list of integers specifying the ADC gain.
-- ``baseline`` (default=None): A list of integers specifying the digital baseline.
-- ``comments`` (default-None): A list of string comments to be written to the header file.
-- ``basetime`` (default=None): A string of the record's start time in 24h HH:MM:SS(.ms) format.
-- ``basedate`` (default=None): A string of the record's start date in DD/MM/YYYY format.
-
-
-Reading Annotations
-~~~~~~~~~~~~~~~~~~~
-
-**rdann** - Read a WFDB annotation file ``recordname.annot`` and return an Annotation object.
-
-::
-
- annotation = rdann(recordname, extension, sampfrom=0, sampto=None, shiftsamps=False,
- pbdir=None, return_label_elements=['symbol'], summarize_labels=False)
-
-Example Usage:
-::
-
- import wfdb
- ann = wfdb.rdann('sampledata/100', 'atr', sampto = 300000)
-
-Input Arguments:
-
-- ``recordname`` (required): The record name of the WFDB annotation file. ie. for file `100.atr`, recordname='100'
-- ``extension`` (required): The annotatator extension of the annotation file. ie. for
- file '100.atr', extension='atr'
-- ``sampfrom`` (default=0): The minimum sample number for annotations to be returned.
-- ``sampto`` (default=None): The maximum sample number for annotations to be returned.
-- ``shiftsamps`` (default=False): Boolean flag that specifies whether to return the
- sample indices relative to 'sampfrom' (True), or sample 0 (False). Annotation files
- store exact sample locations.
-- ``pbdir`` (default=None): Option used to stream data from Physiobank. The Physiobank database
- directory from which to find the required annotation file.
- eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
-- ``return_label_elements`` (default=['symbol']): The label elements that are to be returned
- from reading the annotation file. A list with at least one of the following: 'symbol',
- 'label_store', 'description'.
-- ``summarize_labels`` (default=False): Assign a summary table of the set of annotation labels
- contained in the file to the 'contained_labels' attribute of the returned object.
- This table will contain the columns: ['label_store', 'symbol', 'description', 'n_occurences']
-
-Output arguments:
-
-- ``annotation``: The Annotation object. Contains the following attributes:
- - ``recordname``: The base file name (without extension) of the record that the annotation is attached to.
- - ``extension``: The file extension of the file the annotation is stored in.
- - ``sample``: The annotation locations in samples relative to the beginning of the record.
- - ``symbol``: The annotation type according the the standard WFDB codes.
- - ``subtype``: The marked class/category of each annotation.
- - ``chan``: The signal channel associated with each annotations.
- - ``num``: The labelled annotation number for each annotation.
- - ``aux_note``: The auxiliary information string for each annotation.
- - ``fs``: The sampling frequency of the record, if available.
- - ``label_store``: The integer value used to store/encode each annotation label
- - ``description``: The descriptive string of each annotation label
- - ``custom_labels``: The custom annotation labels defined in the annotation file.
- Maps the relationship between the three label fields.
- The data type is a pandas DataFrame with three columns: ['label_store', 'symbol', 'description']
- - ``contained_labels``: The unique labels contained in this annotation. Same structure
- as custom_labels.
-
-\*\ **NOTE**: In annotation files, every annotation contains the ‘sample’ and ‘symbol’ field. All other fields default to 0 or empty if not present.
-
-**show_ann_labels** - Display the annotation symbols and the codes they represent according to the standard WFDB library 10.5.24
-
-::
-
- show_ann_labels()
-
-Writing Annotations
-~~~~~~~~~~~~~~~~~~~
-
-The Annotation class has a **wrann** instance method for writing wfdb annotation files. Create a valid Annotation object and call ``annotation.wrsamp()``. In addition, there is also the following simpler module level **wrann** function.
-
-**wrann** - Write a WFDB annotation file.
-
-::
-
- wrann(recordname, extension, sample, symbol=None, subtype=None, chan=None,
- num=None, aux_note=None, label_store=None, fs=None, custom_labels=None)
-
-Example Usage:
-
-::
-
- import wfdb
- annotation = wfdb.rdann('b001', 'atr', pbdir='cebsdb')
- wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
-
-Input Arguments:
-
-- ``recordname`` (required): The string name of the WFDB record to be written (without any file extensions).
-- ``extension`` (required): The string annotation file extension.
-- ``sample`` (required): The annotation location in samples relative to the beginning of the record. Numpy array.
-- ``symbol`` (default=None): The symbols used to display the annotation labels. List or numpy array. If this field is present, 'label_store' must not be present.
-- ``subtype`` (default=None): The marked class/category of each annotation. Numpy array.
-- ``chan`` (default=None): The signal channel associated with each annotation. Numpy array.
-- ``num`` (default=None): The labelled annotation number of each annotation. Numpy array.
-- ``aux_note`` (default=None): The auxiliary information strings. List or numpy array.
-- ``label_store`` (default=None): The integer values used to store the annotation labels. Numpy array. If this field is present, 'symbol' must not be present.
-- ``fs`` (default=None): The numerical sampling frequency of the record to be written to the file.
-- ``custom_labels`` (default=None): The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. The custom labels are defined by two or three fields:
-
- - The integer values used to store custom annotation labels in the file (optional)
- - Their short display symbols
- - Their long descriptions.
-
- This input argument may come in four formats:
-
- 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description']
- 2. A pandas.DataFrame object with columns: ['symbol', 'description']
- If this option is chosen, label_store values are automatically chosen.
- 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description).
- 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description).
- If this option is chosen, label_store values are automatically chosen.
-
- If the 'label_store' field is given for this function, and 'custom_labels' is defined, 'custom_labels'
- must contain 'label_store' in its mapping. ie. it must come in format 1 or 3 above.
+The native Python waveform-database (WFDB) package. A library of tools for reading, writing, and processing WFDB signals and annotations.
-\*\ **NOTE**: Each annotation stored in a WFDB annotation file contains a sample and a label field. All other fields may or may not be present. Therefore in order to save space, when writing additional string features such as 'aux_note' that are not present for every annotation, it is recommended to make the field a list, with empty indices set to None so that they are not written to the file.
+Core components of this package are based on the original WFDB specifications. This package does not contain the exact same functionality as the original WFDB package. It aims to implement as many of its core features as possible, with user-friendly APIs. Additional useful physiological signal-processing tools are added over time.
-Plotting Data
-~~~~~~~~~~~~~
+Documentation and Usage
+-----------------------
-**plotrec** - Subplot and label each channel of a WFDB Record. Optionally, subplot annotation locations over selected channels.
+See the documentation site for the public APIs.
-::
+See the `demo.ipynb`_ notebook file for more example use cases.
- plotrec(record=None, title = None, annotation = None, timeunits='samples',
- sigstyle='', annstyle='r*', plotannsym=False, figsize=None,
- returnfig=False, ecggrids=[]):
-
-Example Usage:
-
-::
-
- import wfdb
- record = wfdb.rdsamp('sampledata/100', sampto = 3000)
- annotation = wfdb.rdann('sampledata/100', 'atr', sampto = 3000)
-
- wfdb.plotrec(record, annotation = annotation,
- title='Record 100 from MIT-BIH Arrhythmia Database',
- timeunits = 'seconds', figsize = (10,4), ecggrids = 'all')
-
-Input Arguments:
-
-- ``record`` (required): A wfdb Record object. The p_signals attribute will be plotted.
-- ``title`` (default=None): A string containing the title of the graph.
-- ``annotation`` (default=None): A list of Annotation objects or numpy arrays. The locations of the Annotation objects' 'sample' attribute, or the locations of the numpy arrays' values, will be overlaid on the signals. The list index of the annotation item corresponds to the signal channel that each annotation set will be plotted on. For channels without annotations to plot, put None in the list. This argument may also be just an Annotation object or numpy array, which will be plotted over channel 0.
-- ``timeunits`` (default='samples'): String specifying the x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'.
-- ``sigstyle`` (default=''): String, or list of strings, specifying the styling of the matplotlib plot for the signals. If 'sigstyle' is a string, each channel will have the same style. If it is a list, each channel's style will correspond to the list element. ie. sigtype=['r','b','k'].
-- ``annstyle`` (default='r*'): String, or list of strings, specifying the styling of the matplotlib plot for the annotations. If 'annstyle' is a string, each channel will have the same style. If it is a list, each channel's style will correspond to the list element.
-- ``plotannsym`` (default=False): Specifies whether to plot the annotation symbols at their locations.
-- ``figsize`` (default=None): Tuple pair specifying the width, and height of the figure. Same as the 'figsize' argument passed into matplotlib.pyplot's figure() function.
-- ``returnfig`` (default=False): Specifies whether the figure is to be returned as an output argument
-- ``ecggrids`` (default=[]): List of integers specifying channels in which to plot ecg grids. May be set to [] for no channels, or 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be plotted with grids must have units equal to 'uV', 'mV', or 'V'.
-
-Output argument:
-- ``figure``: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True.
-
-
-**plotann** - Plot sample locations of an Annotation object.
-
-::
-
- plotann(annotation, title = None, timeunits = 'samples', returnfig = False)
-
-Example Usage:
-
-::
-
- import wfdb
- record = wfdb.rdsamp('sampledata/100', sampto = 15000)
- annotation = wfdb.rdann('sampledata/100', 'atr', sampto = 15000)
-
- wfdb.plotrec(record, annotation = annotation,
- title='Record 100 from MIT-BIH Arrhythmia Database',
- timeunits = 'seconds')
-
-
-Input Arguments:
-
-- ``annotation`` (required): An Annotation object. The sample attribute locations will be overlaid on the signal.
-- ``title`` (default=None): A string containing the title of the graph.
-- ``annotation`` (default=None): An Annotation object. The sample attribute locations will be overlaid on the signal.
-- ``timeunits`` (default='samples'): String specifying the x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'.
-- ``returnfig`` (default=False): Specifies whether the figure is to be returned as an output argument
-
-Output argument:
-- ``figure``: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True.
-
-Downloading Physiobank Content
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Download files from various Physiobank databases. The Physiobank index page located at http://physionet.org/physiobank/database lists all available databases.
-
-
-**getdblist** - Return a list of all the physiobank databases available.
-
-::
-
- dblist = wfdb.getdblist()
-
-Example Usage:
-
-::
-
- import wfdb
- dblist = wfdb.getdblist()
-
-**dldatabase** - Download WFDB record (and optionally annotation) files from a Physiobank database. The database must contain a 'RECORDS' file in its base directory which lists its WFDB records.
-
-::
-
- dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' ,
- keepsubdirs = True, overwrite = False)
-
-Example Usage:
-
-::
-
- import wfdb
- wfdb.dldatabase('ahadb', os.getcwd())
-
-Input arguments:
-
-- ``pbdb`` (required): The Physiobank database directory to download. eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
-- ``dlbasedir`` (required): The full local directory path in which to download the files.
-- ``records`` (default='all'): Specifier of the WFDB records to download. Is either a list of strings which each specify a record, or 'all' to download all records listed in the database's RECORDS file. eg. records = ['test01_00s', test02_45s] for database https://physionet.org/physiobank/database/macecgdb/
-- ``annotators`` (default='all'): Specifier of the WFDB annotation file types to download along with the record files. Is either None to skip downloading any annotations, 'all' to download all annotation types as specified by the ANNOTATORS file, or a list of strings which each specify an annotation extension. eg. annotators = ['anI'] for database https://physionet.org/physiobank/database/prcp/
-- ``keepsubdirs`` (default=True): Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False).
-- ``overwrite`` (default=False): If set to True, all files will be redownloaded regardless. If set to False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended.
-
-
-**dldatabasefiles** - Download specified files from a Physiobank database.
-
-::
-
- dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False)
-
-Example Usage:
-
-::
-
- import wfdb
- wfdb.dldatabasefiles('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
-
-Input arguments:
-
-- ``pbdb`` (required): The Physiobank database directory to download. eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
-- ``dlbasedir`` (required): The full local directory path in which to download the files.
-- ``files`` (required): A list of strings specifying the file names to download relative to the database base directory
-- ``keepsubdirs`` (default=True): Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False).
-- ``overwrite`` (default=False): If set to True, all files will be redownloaded regardless. If set to False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended.
-
-
-Signal Processing
------------------
-
-Basic Functionalities
-~~~~~~~~~~~~~~~~~~~~~
-
-**resample_sig** - Resample a single-channel signal
-
-::
-
- resample_sig(x, fs, fs_target)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, fields = wfdb.srdsamp('sampledata/100', sampto=10000)
- x, _ = wfdb.processing.resample_sig(x=sig[:,0], fs=fields['fs'], fs_target=128)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``fs`` (required): The signal frequency.
-- ``fs_target`` (required): The target signal frequency.
-
-
-**resample_singlechan** - Resample a single-channel signal and its annotation.
-
-::
-
- resample_singlechan(x, ann, fs, fs_target)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, fields = wfdb.srdsamp('sampledata/100')
- ann = wfdb.rdann('sampledata/100', 'atr')
- new_sig, new_ann = wfdb.processing.resample_singlechan(x=sig[:, 0], ann=ann, fs=fields['fs'], fs_target=50)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``ann`` (required): The signal Annotation.
-- ``fs`` (required): The signal frequency.
-- ``fs_target`` (required): The target signal frequency.
-
-
-
-**resample_multichan** - Resample a multi-channel signal and its annotation.
-
-::
-
- resample_multichan(sig, ann, fs, fs_target)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, fields = wfdb.srdsamp('sampledata/100')
- ann = wfdb.rdann('sampledata/100', 'atr')
- new_sig, new_ann = wfdb.processing.resample_multichan(sig=sig, ann=ann, fs=fields['fs'], fs_target=50)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``ann`` (required): The signal Annotation.
-- ``fs`` (required): The signal frequency.
-- ``fs_target`` (required): The target signal frequency.
-
-
-
-**normalize** - Resizes a signal between a lower and upper bound
-
-::
-
- normalize(x, lb=0, ub=1)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, _ = wfdb.srdsamp('sampledata/100')
- x = wfdb.processing.normalize(x=sig[:, 0], lb=-2, ub=15)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``lb`` (required): The lower bound.
-- ``ub`` (required): The upper bound.
-
-
-
-**smooth** - Signal smoothing
-
-::
-
- smooth(x, window_size)
-
-Example Usage:
-
-::
-
- import wfdb
- sig, _ = wfdb.srdsamp('sampledata/100')
- x = smooth(x=sig[:,0], window_size=150)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``window_size`` (required): The smoothing window width.
-
-
-Peak Detection
-~~~~~~~~~~~~~~
-
-**gqrs_detect** - The GQRS detector function
-
-::
-
- gqrs_detect(x, fs, adcgain, adczero, threshold=1.0, hr=75, RRdelta=0.2,
- RRmin=0.28, RRmax=2.4, QS=0.07, QT=0.35, RTmin=0.25, RTmax=0.33,
- QRSa=750, QRSamin=130):
-
-Example Usage:
-
-::
-
- import wfdb
- t0 = 10000
- tf = 20000
- record = wfdb.rdsamp("sampledata/100", sampfrom=t0, sampto=tf, channels=[0])
- d_signal = record.adc()[:,0]
- peak_indices = wfdb.processing.gqrs_detect(x=d_signal, fs=record.fs, adcgain=record.adcgain[0], adczero=record.adczero[0], threshold=1.0)
-
-Input arguments:
-
-- ``x`` (required): The digital signal as a numpy array
-- ``fs`` (required): The sampling frequency of the signal
-- ``adcgain``: The gain of the signal (the number of adus (q.v.) per physical unit)
-- ``adczero`` (required): The value produced by the ADC given a 0 volt input.
-- ``threshold`` (default=1.0): The threshold for detection
-- ``hr`` (default=75): Typical heart rate, in beats per minute
-- ``RRdelta`` (default=0.2): Typical difference between successive RR intervals in seconds
-- ``RRmin`` (default=0.28): Minimum RR interval ("refractory period"), in seconds
-- ``RRmax`` (default=2.4): Maximum RR interval, in seconds; thresholds will be adjusted if no peaks are detected within this interval
-- ``QS`` (default=0.07): Typical QRS duration, in seconds
-- ``QT`` (default=0.35): Typical QT interval, in seconds
-- ``RTmin`` (default=0.25): Minimum interval between R and T peaks, in seconds
-- ``RTmax`` (default=0.33): Maximum interval between R and T peaks, in seconds
-- ``QRSa`` (default=750): Typical QRS peak-to-peak amplitude, in microvolts
-- ``QRSamin`` (default=130): Minimum QRS peak-to-peak amplitude, in microvolts
-
-Output Arguments:
-
-- ``peak_indices``: A python list containing the peak indices.
-
-
-**correct_peaks** - A post-processing algorithm to correct peaks position.
-
-See code comments for details about the algorithm.
-
-
-::
-
- correct_peaks(x, peak_indices, min_gap, max_gap, smooth_window)
-
-Input arguments:
-
-- ``x`` (required): The signal.
-- ``peak_indices`` (required): The location of the peaks.
-- ``min_gap`` (required): The minimum gap in samples between two peaks.
-- ``max_gap`` (required): The maximum gap in samples between two peaks.
-- ``smooth_window`` (required): The size of the smoothing window.
-
-Output Arguments:
-
-- ``new_indices``: A python list containing the new peaks indices.
-
-
-Example Usage:
-
-::
-
- import wfdb
- t0 = 10000
- tf = 20000
- record = wfdb.rdsamp('sampledata/100', sampfrom=t0, sampto=tf, channels=[0])
- d_signal = record.adc()[:,0]
- peak_indices = wfdb.processing.gqrs_detect(d_signal, fs=record.fs,
- adcgain=record.adcgain[0],
- adczero=record.adczero[0],
- threshold=1.0)
- min_bpm = 10
- max_bpm = 350
- min_gap = record.fs*60/min_bpm
- max_gap = record.fs*60/max_bpm
- new_indices = wfdb.processing.correct_peaks(d_signal, peak_indices=peak_indices,
- min_gap=min_gap, max_gap=max_gap,
- smooth_window=150)
+Installation
+------------
-Heart Rate
-~~~~~~~~~~~~~~
+The distribution is hosted on pypi at: https://pypi.python.org/pypi/wfdb/. To directly install the package from pypi without needing to explicitly download content, run from your terminal::
-**compute_hr** - Compute instantaneous heart rate from peak indices and signal frequency.
+ $ pip install wfdb
-::
+The development version is hosted at: https://github.com/MIT-LCP/wfdb-python. This repository also contains demo scripts and example data. To install the development version, clone or download the repository, navigate to the base directory, and run::
- compute_hr(siglen, peak_indices, fs)
+ $ pip install .
-Input arguments:
-- ``siglen`` (required): The length of the corresponding signal.
-- ``peak_indices`` (required): The peak indices.
-- ``fs`` (required): The corresponding signal's sampling frequency.
+Development
+-----------
+The development repository is hosted at: https://github.com/MIT-LCP/wfdb-python
-Output Arguments:
+The package is to be expanded with physiological signal-processing tools, and general improvements. Development is made for Python 2.7 and 3.5+ only.
-- ``hr``: A numpy array of the instantaneous heart rate, with the length of the corresponding signal. Contains numpy.nan where heart rate could not be computed.
+Contributing
+------------
-Example Usage:
+We welcome community contributions in the form of pull requests. When contributing code, please ensure:
-::
+* PEP8_ style guidelines are followed.
+* Documentation is provided. New functions and classes should have numpy/scipy style docstrings_.
+* Unit tests are written for new features that are not covered by `existing tests`_.
- import wfdb
- t0 = 10000
- tf = 20000
- record = wfdb.rdsamp("sampledata/100", sampfrom=t0, sampto=tf, channels=[0])
- peak_indices = wfdb.processing.gqrs_detect(record.adc(), fs=record.fs,
- adcgain=record.adcgain[0],
- adczero=record.adczero[0],
- threshold=1.0)
- hr = wfdb.processing.compute_hr(siglen=tf-t0, peak_indices=peak_indices, fs=record.fs)
+Authors
+-------
+`Chen Xie`_
+`Julien Dubiel`_
-Based on the original WFDB software package specifications
-----------------------------------------------------------
-| `WFDB Software Package`_
-| `WFDB Applications Guide`_
-| `WFDB Header File Specifications`_
+.. |Build Status| image:: https://travis-ci.org/MIT-LCP/wfdb-python.svg?branch=master
+ :target: https://travis-ci.org/MIT-LCP/wfdb-python
-.. _WFDB Software Package: http://physionet.org/physiotools/wfdb.shtml
-.. _WFDB Applications Guide: http://physionet.org/physiotools/wag/
-.. _WFDB Header File Specifications: https://physionet.org/physiotools/wag/header-5.htm
+.. _PEP8: https://www.python.org/dev/peps/pep-0008/
+.. _docstrings: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+.. _existing tests: https://github.com/MIT-LCP/wfdb-python/tree/master/tests
+.. _demo.ipynb: https://github.com/MIT-LCP/wfdb-python/blob/master/demo.ipynb
-.. |Build Status| image:: https://travis-ci.org/MIT-LCP/wfdb-python.svg?branch=master
- :target: https://travis-ci.org/MIT-LCP/wfdb-python
+.. _Chen Xie: https://github.com/cx1111/
+.. _Julien Dubiel: https://github.com/Dubrzr/
diff --git a/demoimg.png b/demo-img.png
similarity index 100%
rename from demoimg.png
rename to demo-img.png
diff --git a/demo.ipynb b/demo.ipynb
index a5a34776..eab2f59e 100644
--- a/demo.ipynb
+++ b/demo.ipynb
@@ -6,22 +6,15 @@
"source": [
"# Demo Scripts for the wfdb-python package\n",
"\n",
- "Run this script from the base directory of the git repository to access the included demo files"
+ "Run this notebook from the base directory of the git repository to access the included demo files"
]
},
{
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
+ "cell_type": "markdown",
+ "metadata": {},
"source": [
- "import wfdb\n",
- "import numpy as np\n",
- "import os\n",
- "import matplotlib.pyplot as plt\n",
- "from IPython.display import display"
+ "## Documentation Site\n",
+ "\n"
]
},
{
@@ -32,11 +25,14 @@
},
"outputs": [],
"source": [
- "# See the help documentation for the read functions\n",
+ "from IPython.display import display\n",
+ "import matplotlib.pyplot as plt\n",
+ "%matplotlib inline\n",
+ "import numpy as np\n",
+ "import os\n",
+ "import shutil\n",
"\n",
- "#help(wfdb.rdsamp)\n",
- "#help(wfdb.srdsamp)\n",
- "#help(wfdb.rdann)"
+ "import wfdb"
]
},
{
@@ -49,87 +45,79 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
- "# Demo 1 - Read a wfdb record using the 'rdsamp' function into a wfdb.Record object.\n",
+ "# Demo 1 - Read a wfdb record using the 'rdrecord' function into a wfdb.Record object.\n",
"# Plot the signals, and show the data.\n",
- "record = wfdb.rdsamp('sampledata/a103l') \n",
- "wfdb.plotrec(record, title='Record a103l from Physionet Challenge 2015') \n",
+ "record = wfdb.rdrecord('sample-data/a103l') \n",
+ "wfdb.plot_wfdb(record=record, title='Record a103l from Physionet Challenge 2015') \n",
"display(record.__dict__)\n",
"\n",
"\n",
"# Can also read the same files hosted on Physiobank https://physionet.org/physiobank/database/\n",
"# in the challenge/2015/training/ database subdirectory. Full url = https://physionet.org/physiobank/database/challenge/2015/training/\n",
- "record2 = wfdb.rdsamp('a103l', pbdir = 'challenge/2015/training/')"
+ "record2 = wfdb.rdrecord('a103l', pb_dir = 'challenge/2015/training/')"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
- "# Demo 2 - Read certain channels and sections of the WFDB record using the simplified 'srdsamp' function\n",
+ "# Demo 2 - Read certain channels and sections of the WFDB record using the simplified 'rdsamp' function\n",
"# which returns a numpy array and a dictionary. Show the data.\n",
- "signals, fields=wfdb.srdsamp('sampledata/s0010_re', channels=[14, 0, 5, 10], sampfrom=100, sampto=15000)\n",
+ "signals, fields = wfdb.rdsamp('sample-data/s0010_re', channels=[14, 0, 5, 10], sampfrom=100, sampto=15000)\n",
"display(signals)\n",
"display(fields)\n",
"\n",
"# Can also read the same files hosted on Physiobank \n",
- "signals2, fields2=wfdb.srdsamp('s0010_re', channels=[14, 0, 5, 10], sampfrom=100, sampto=15000, pbdir = 'ptbdb/patient001/')"
+ "signals2, fields2 = wfdb.rdsamp('s0010_re', channels=[14, 0, 5, 10], sampfrom=100, sampto=15000, pb_dir='ptbdb/patient001/')"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 3 - Read a WFDB header file only (without the signals)\n",
- "record = wfdb.rdheader('sampledata/drive02')\n",
+ "record = wfdb.rdheader('sample-data/drive02')\n",
"display(record.__dict__)\n",
"\n",
"# Can also read the same file hosted on Physiobank\n",
- "record2 = wfdb.rdheader('drive02', pbdir = 'drivedb')"
+ "record2 = wfdb.rdheader('drive02', pb_dir='drivedb')"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 4 - Read part of a WFDB annotation file into a wfdb.Annotation object, and plot the samples\n",
- "annotation = wfdb.rdann('sampledata/100', 'atr', sampfrom = 100000, sampto = 110000)\n",
+ "annotation = wfdb.rdann('sample-data/100', 'atr', sampfrom=100000, sampto=110000)\n",
"annotation.fs = 360\n",
- "wfdb.plotann(annotation, timeunits = 'minutes')\n",
+ "wfdb.plot_wfdb(annotation=annotation, time_units='minutes')\n",
"\n",
"# Can also read the same file hosted on PhysioBank \n",
- "annotation2 = wfdb.rdann('100', 'atr', sampfrom = 100000, sampto = 110000, pbdir = 'mitdb')"
+ "annotation2 = wfdb.rdann('100', 'atr', sampfrom=100000, sampto=110000, pb_dir='mitdb')"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 5 - Read a WFDB record and annotation. Plot all channels, and the annotation on top of channel 0.\n",
- "record = wfdb.rdsamp('sampledata/100', sampto = 15000)\n",
- "annotation = wfdb.rdann('sampledata/100', 'atr', sampto = 15000)\n",
+ "record = wfdb.rdrecord('sample-data/100', sampto = 15000)\n",
+ "annotation = wfdb.rdann('sample-data/100', 'atr', sampto = 15000)\n",
"\n",
- "wfdb.plotrec(record, annotation = annotation, title='Record 100 from MIT-BIH Arrhythmia Database', timeunits = 'seconds')"
+ "wfdb.plot_wfdb(record=record, annotation=annotation,\n",
+ " title='Record 100 from MIT-BIH Arrhythmia Database',\n",
+ " time_units='seconds')"
]
},
{
@@ -147,16 +135,12 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 6 - Read the multi-segment record and plot waveforms from the MIMIC matched waveform database. \n",
- "import wfdb\n",
- "from IPython.display import display\n",
- "record=wfdb.rdsamp('sampledata/multisegment/s25047/s25047-2704-05-04-10-44')\n",
- "wfdb.plotrec(record, title='Record s25047-2704-05-04-10-44') \n",
+ "record = wfdb.rdrecord('sample-data/multi-segment/s25047/s25047-2704-05-04-10-44')\n",
+ "wfdb.plot_wfdb(record=record, title='Record s25047-2704-05-04-10-44') \n",
"display(record.__dict__)\n",
"\n",
"# Can also read the same files hosted on PhysioBank (takes long to stream the many large files)\n",
@@ -166,19 +150,19 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 7 - Read the multi-segment record and plot waveforms from the MIMIC matched waveform database.\n",
"# Notice that some channels have no valid values to plot\n",
- "record = wfdb.rdsamp('sampledata/multisegment/s00001/s00001-2896-10-10-00-31', sampfrom = 3000000, sampto = 4000000)\n",
- "wfdb.plotrec(record, title='Record s00001/s00001-2896-10-10-00-31') \n",
+ "record = wfdb.rdrecord('sample-data/multi-segment/s00001/s00001-2896-10-10-00-31',\n",
+ " sampfrom=3000000, sampto=4000000)\n",
+ "wfdb.plot_wfdb(record, title='Record s00001/s00001-2896-10-10-00-31') \n",
"display(record.__dict__)\n",
"\n",
"# Can also read the same files hosted on PhysioBank\n",
- "record2 = wfdb.rdsamp('s00001-2896-10-10-00-31', sampfrom = 3000000, sampto = 4000000, pbdir = 'mimic2wdb/matched/s00001')"
+ "record2 = wfdb.rdrecord('s00001-2896-10-10-00-31', sampfrom=3000000, sampto=4000000,\n",
+ " pb_dir = 'mimic2wdb/matched/s00001')"
]
},
{
@@ -187,46 +171,42 @@
"source": [
"### Multiple sample/frame examples\n",
"\n",
- "Although there can only be one base sampling frequency per record, a single wfdb record can store multiple channels with different sampling frequencies, as long as their sampling frequencies can all be expressed by an integer multiple of a base value. This is done by using the `sampsperframe` attribute in each channel, which indicates the number of samples of each channel present in each frame.\n",
+ "Although there can only be one base sampling frequency per record, a single wfdb record can store multiple channels with different sampling frequencies, as long as their sampling frequencies can all be expressed by an integer multiple of a base value. This is done by using the `samps_per_frame` attribute in each channel, which indicates the number of samples of each channel present in each frame.\n",
"\n",
- "ie: To capture three signals with `fs = 120, 240, and 360 Hz` in a single record, they can be combined into a record with `fs = 120` and `sampsperframe = [1, 2, 3]`.\n",
+ "ie: To capture three signals with sampling frequencies of 120, 240, and 360 Hz, in a single record, they can be combined into a record with `fs=120` and `samps_per_frame = [1, 2, 3]`.\n",
"\n",
"#### Reading Options\n",
"\n",
"This package allows signals in records with multiple samples/frame to be read in two ways:\n",
- "1. smoothed - An uniform mxn numpy is returned as the d_signals or p_signals field. Channels with multiple samples/frame have their values averaged within each frame. This is like the behaviour of the `rdsamp` function of the original WFDB c package. Note that `wfdb.plotrec` only works if the record object has the `p_signals` field.\n",
- "2. expanded - A list of 1d numpy arrays is returned as the e_d_signals or e_p_signals field. All samples for each channel are returned in its respective numpy array. The arrays may have different lengths depending on their `sampsperframe` values. \n",
+ "1. smoothed - An uniform mxn numpy is returned as the d_signal or p_signal field. Channels with multiple samples/frame have their values averaged within each frame. This is like the behaviour of the `rdsamp` function of the original WFDB c package. Note that `wfdb.plot_record` only works if the record object has the `p_signals` field.\n",
+ "2. expanded - A list of 1d numpy arrays is returned as the e_d_signal or e_p_signal field. All samples for each channel are returned in its respective numpy array. The arrays may have different lengths depending on their `samps_per_frame` values.\n",
"\n",
- "Set the `smoothframes` *(default=True)* option in `rdsamp` to return the desired signal type."
+ "Set the `smooth_frames` *(default=True)* option in `rdrecord` to return the desired signal type."
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 8 - Read a wfdb record in which one channel has multiple samples/frame. Return a smoothed uniform array.\n",
- "record = wfdb.rdsamp('sampledata/test01_00s_frame')\n",
- "wfdb.plotrec(record)"
+ "record = wfdb.rdrecord('sample-data/test01_00s_frame')\n",
+ "wfdb.plot_wfdb(record)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 9 - Read a wfdb record in which one channel has multiple samples/frame. Return a list of all the expanded samples.\n",
- "record = wfdb.rdsamp('sampledata/test01_00s_frame', smoothframes = False)\n",
+ "record = wfdb.rdrecord('sample-data/test01_00s_frame', smooth_frames=False)\n",
"\n",
- "display(record.e_p_signals)\n",
+ "display(record.e_p_signal)\n",
"# Show that different channels have different lengths. Channel 1 has 2 samples/frame, hence has 2x as many samples.\n",
- "print([len(s) for s in record.e_p_signals])\n",
+ "print([len(s) for s in record.e_p_signal])\n",
"\n",
"# wfdb.plotrec doesn't work because the Record object is missing its p_signals field."
]
@@ -250,14 +230,18 @@
"# of the Record object.\n",
"\n",
"# Read a record as a Record object.\n",
- "record = wfdb.rdsamp('sampledata/100', physical = False)\n",
- "record.recordname = '100x'\n",
+ "record = wfdb.rdrecord('sample-data/100', physical=False)\n",
+ "record.record_name = '100x'\n",
"\n",
"# Call the instance method of the object\n",
"record.wrsamp()\n",
"\n",
"# The new file can be read\n",
- "recordx = wfdb.rdsamp('100x')"
+ "record_x = wfdb.rdrecord('100x')\n",
+ "\n",
+ "# Cleanup: delete the created files\n",
+ "# os.remove('100x.hea')\n",
+ "# os.remove('100.dat')"
]
},
{
@@ -272,13 +256,17 @@
"# This is the basic way to write physical signals to a WFDB file. \n",
"\n",
"# Read part of a record from Physiobank\n",
- "sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1], pbdir = 'challenge/2015/training')\n",
+ "sig, fields = wfdb.rdsamp('a103l', sampfrom=50000, channels=[0,1], pb_dir='challenge/2015/training')\n",
"\n",
"# Call the gateway wrsamp function, manually inserting fields as function input parameters\n",
- "wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'], signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16'])\n",
+ "wfdb.wrsamp('ecg-record', fs=250, units=['mV', 'mV'], sig_name=['I', 'II'], p_signal=sig, fmt=['16', '16'])\n",
"\n",
"# The new file can be read\n",
- "recordecg = wfdb.rdsamp('ecgrecord')"
+ "record = wfdb.rdrecord('ecg-record')\n",
+ "\n",
+ "# Cleanup: delete the created files\n",
+ "# os.remove('ecg-record.hea')\n",
+ "# os.remove('ecg-record.dat')"
]
},
{
@@ -292,14 +280,18 @@
"# Demo 12 - Write a WFDB record with multiple samples/frame in a channel\n",
"\n",
"# Read a record as a Record object.\n",
- "record = wfdb.rdsamp('sampledata/test01_00s_frame', physical = False, smoothframes=False)\n",
- "record.recordname = 'test01_00s_framex'\n",
+ "record = wfdb.rdrecord('sample-data/test01_00s_frame', physical=False, smooth_frames=False)\n",
+ "record.record_name = 'test01_00s_framex'\n",
"\n",
- "# Call the instance method of the object with expanded=True to write the record using the e_d_signals field\n",
+ "# Call the instance method of the object with expanded=True to write the record using the e_d_signal field\n",
"record.wrsamp(expanded=True)\n",
"\n",
"# The new file can be read\n",
- "recordx = wfdb.rdsamp('test01_00s_framex')"
+ "recordx = wfdb.rdrecord('test01_00s_framex')\n",
+ "\n",
+ "# Cleanup: deleted the created files\n",
+ "# os.remove('test01_00s_framex.hea')\n",
+ "# os.remove('test01_00s.dat')"
]
},
{
@@ -314,14 +306,17 @@
"# of the Annotation object\n",
"\n",
"# Read an annotation from Physiobank\n",
- "annotation = wfdb.rdann('sampledata/100', 'atr')\n",
- "annotation.annotator = 'cpy'\n",
+ "annotation = wfdb.rdann('sample-data/100', 'atr')\n",
+ "annotation.extension = 'cpy'\n",
"\n",
"# Call the instance method of the object\n",
"annotation.wrann()\n",
"\n",
"# The new file can be read\n",
- "ann100copy = wfdb.rdann('100', 'cpy')"
+ "annotation_copy = wfdb.rdann('100', 'cpy')\n",
+ "\n",
+ "# Cleanup: deleted the created files\n",
+ "# os.remove('100.cpy')"
]
},
{
@@ -332,28 +327,30 @@
},
"outputs": [],
"source": [
- "# Demo 14 - Write a WFDB annotation file without using an Annotator object via the gateway wrann function.\n",
+ "# Demo 14 - Write a WFDB annotation file without using an Annotator\n",
+ "# object via the gateway wrann function.\n",
"\n",
"# Read an annotation as an Annotation object\n",
- "annotation = wfdb.rdann('b001', 'atr', pbdir='cebsdb')\n",
+ "annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')\n",
"\n",
"# Call the gateway wrann function, manually inserting fields as function input parameters\n",
- "wfdb.wrann('b001', 'cpy', annotation.sample, annotation.anntype)\n",
+ "wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)\n",
"\n",
"# The new file can be read\n",
- "annbcopy = wfdb.rdann('b001', 'cpy')"
+ "annotation_copy = wfdb.rdann('b001', 'cpy')\n",
+ "\n",
+ "# Cleanup: deleted the created files\n",
+ "# os.remove('b001.cpy')"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
- "# Demo 15 - View what the 'anntype' symbols mean in the standard WFDB library\n",
- "wfdb.showanncodes()"
+ "# Demo 15 - View the standard WFDB annotation labels\n",
+ "wfdb.show_ann_labels()"
]
},
{
@@ -370,67 +367,64 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 16 - List the Physiobank Databases\n",
"\n",
- "dbs = wfdb.getdblist()\n",
+ "dbs = wfdb.get_dbs()\n",
"display(dbs)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 17 - Download all the WFDB records and annotations from a small Physiobank Database\n",
"\n",
"# Make a temporary download directory in your current working directory\n",
"cwd = os.getcwd()\n",
- "dldir = os.path.join(cwd, 'tmp_dl_dir')\n",
+ "dl_dir = os.path.join(cwd, 'tmp_dl_dir')\n",
"# Make sure to use a new directory\n",
- "while os.path.exists(dldir):\n",
- " dldir = dldir+'1'\n",
+ "while os.path.exists(dl_dir):\n",
+ " dl_dir = dl_dir+'1'\n",
"\n",
"# Download all the WFDB content\n",
- "wfdb.dldatabase('ahadb', dlbasedir = dldir)\n",
+ "wfdb.dl_database('ahadb', dl_dir=dl_dir)\n",
"\n",
"# Display the downloaded content in the folder\n",
- "display(os.listdir(dldir))"
+ "display(os.listdir(dl_dir))\n",
+ "\n",
+ "# Cleanup: delete the downloaded directory\n",
+ "# shutil.rmtree(dl_dir)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {
- "collapsed": true
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Demo 18 - Download specified files from a Physiobank database\n",
"\n",
"# The files to download\n",
- "filelist = ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat', 'data/001b.hea', 'data/001b.dat']\n",
+ "file_list = ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat', 'data/001b.hea', 'data/001b.dat']\n",
"\n",
"# Make a temporary download directory in your current working directory\n",
"cwd = os.getcwd()\n",
- "dldir = os.path.join(cwd, 'tmp_dl_dir')\n",
- "# Make sure to use a new directory\n",
- "while os.path.exists(dldir):\n",
- " dldir = dldir+'1'\n",
+ "dl_dir = os.path.join(cwd, 'tmp_dl_dir')\n",
"\n",
"# Download the listed files\n",
- "wfdb.dldatabasefiles('staffiii', dldir, filelist)\n",
+ "wfdb.dl_files('staffiii', dl_dir, file_list)\n",
"\n",
"# Display the downloaded content in the folder\n",
- "display(os.listdir(dldir))\n",
- "display(os.listdir(os.path.join(dldir, 'data')))"
+ "display(os.listdir(dl_dir))\n",
+ "display(os.listdir(os.path.join(dl_dir, 'data')))\n",
+ "\n",
+ "# Cleanup: delete the downloaded directory\n",
+ "# shutil.rmtree(dl_dir)"
]
},
{
@@ -439,7 +433,7 @@
"collapsed": true
},
"source": [
- "## ECG Peak Detection"
+ "## ECG Processing"
]
},
{
@@ -450,18 +444,30 @@
},
"outputs": [],
"source": [
- "def peaks_hr(x, peak_indices, fs, title, figsize=(20, 10), saveto=None):\n",
- " \n",
+ "import wfdb\n",
+ "from wfdb import processing"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Demo 19 - Use the gqrs detection algorithm and correct the peaks\n",
+ "\n",
+ "def peaks_hr(sig, peak_inds, fs, title, figsize=(20, 10), saveto=None):\n",
+ " \"Plot a signal with its peaks and heart rate\"\n",
" # Calculate heart rate\n",
- " hrs = wfdb.processing.compute_hr(siglen=x.shape[0], peak_indices=peak_indices, fs=fs)\n",
+ " hrs = processing.compute_hr(sig_len=sig.shape[0], qrs_inds=peak_inds, fs=fs)\n",
" \n",
- " N = x.shape[0]\n",
+ " N = sig.shape[0]\n",
" \n",
" fig, ax_left = plt.subplots(figsize=figsize)\n",
" ax_right = ax_left.twinx()\n",
" \n",
- " ax_left.plot(x, color='#3979f0', label='Signal')\n",
- " ax_left.plot(peak_indices, x[peak_indices], 'rx', marker='x', color='#8b0000', label='Peak', markersize=12)\n",
+ " ax_left.plot(sig, color='#3979f0', label='Signal')\n",
+ " ax_left.plot(peak_inds, sig[peak_inds], 'rx', marker='x', color='#8b0000', label='Peak', markersize=12)\n",
" ax_right.plot(np.arange(N), hrs, label='Heart rate', color='m', linewidth=2)\n",
"\n",
" ax_left.set_title(title)\n",
@@ -476,30 +482,60 @@
" plt.savefig(saveto, dpi=600)\n",
" plt.show()\n",
"\n",
+ "# Load the wfdb record and the physical samples\n",
+ "record = wfdb.rdrecord('sample-data/100', sampfrom=0, sampto=10000, channels=[0])\n",
"\n",
- "recordname = 'sampledata/100'\n",
+ "# Use the gqrs algorithm to detect qrs locations in the first channel\n",
+ "qrs_inds = processing.gqrs_detect(sig=record.p_signal[:,0], fs=record.fs)\n",
"\n",
- "def gqrs_plot(recordname, t0=0, tf=10000):\n",
- " # Load the wfdb record and the physical samples\n",
- " record = wfdb.rdsamp(recordname, sampfrom=t0, sampto=tf, channels=[0])\n",
- " \n",
- " # Use the gqrs algorithm to find peaks in the first channel\n",
- " # The gqrs_detect argument expects a digital signal for the first argument.\n",
- " d_signal = record.adc()[:,0]\n",
- " peak_indices = wfdb.processing.gqrs_detect(d_signal, fs=record.fs, adcgain=record.adcgain[0], adczero=record.adczero[0], threshold=1.0)\n",
- " print('gqrs detected peak indices:', peak_indices)\n",
- " peaks_hr(x=record.p_signals, peak_indices=peak_indices, fs=record.fs, title=\"GQRS peak detection on sampledata/100\")\n",
+ "# Plot results\n",
+ "peaks_hr(sig=record.p_signal, peak_inds=qrs_inds, fs=record.fs,\n",
+ " title=\"GQRS peak detection on record 100\")\n",
" \n",
- " # Correct the peaks by applying constraints\n",
- " min_bpm = 20\n",
- " max_bpm = 230\n",
- " min_gap = record.fs*60/min_bpm\n",
- " max_gap = record.fs*60/max_bpm\n",
- " peak_indices = wfdb.processing.correct_peaks(d_signal, peak_indices=peak_indices, min_gap=min_gap, max_gap=max_gap, smooth_window=150)\n",
- " print('corrected gqrs detected peak indices:', sorted(peak_indices))\n",
- " peaks_hr(x=record.p_signals, peak_indices=sorted(peak_indices), fs=record.fs, title=\"Corrected GQRS peak detection on sampledata/100\")\n",
- "\n",
- "gqrs_plot(recordname)"
+ "# Correct the peaks shifting them to local maxima\n",
+ "min_bpm = 20\n",
+ "max_bpm = 230\n",
+ "#min_gap = record.fs * 60 / min_bpm\n",
+ "# Use the maximum possible bpm as the search radius\n",
+ "search_radius = int(record.fs * 60 / max_bpm)\n",
+ "corrected_peak_inds = processing.correct_peaks(record.p_signal[:,0], peak_inds=qrs_inds,\n",
+ " search_radius=search_radius, smooth_window_size=150)\n",
+ "\n",
+ "# Display results\n",
+ "print('Corrected gqrs detected peak indices:', sorted(corrected_peak_inds))\n",
+ "peaks_hr(sig=record.p_signal, peak_inds=sorted(corrected_peak_inds), fs=record.fs,\n",
+ " title=\"Corrected GQRS peak detection on sampledata/100\")\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Demo 20 - Use the xqrs detection algorithm and compare results to reference annotations\n",
+ "import wfdb\n",
+ "from wfdb import processing\n",
+ "sig, fields = wfdb.rdsamp('sample-data/100', channels=[0], sampto=15000)\n",
+ "ann_ref = wfdb.rdann('sample-data/100','atr', sampto=15000)\n",
+ "\n",
+ "# Run qrs detection on signal\n",
+ "xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])\n",
+ "xqrs.detect()\n",
+ "# Alternatively, use the gateway function to get the qrs indices directly\n",
+ "# qrs_inds = processing.xqrs_detect(sig=sig[:,0], fs=fields['fs'])\n",
+ "\n",
+ "# Compare detected qrs complexes to reference annotation.\n",
+ "# Note, first sample in 100.atr is not a qrs.\n",
+ "comparitor = processing.compare_annotations(ref_sample=ann_ref.sample[1:],\n",
+ " test_sample=xqrs.qrs_inds,\n",
+ " window_width=int(0.1 * fields['fs']),\n",
+ " signal=sig[:,0])\n",
+ "\n",
+ "# Print and plot the results\n",
+ "comparitor.print_summary()\n",
+ "comparitor.plot(title='xqrs detected qrs vs reference annotations')"
]
},
{
diff --git a/devtests.ipynb b/devtests.ipynb
deleted file mode 100644
index cee09f77..00000000
--- a/devtests.ipynb
+++ /dev/null
@@ -1,79 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "
This script is for internal developer testing.
\n",
- "\n",
- "This is different from the unit tests located in the `tests` directory."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import wfdb"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {
- "collapsed": false,
- "scrolled": true
- },
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1sAAAF5CAYAAABpxzARAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAIABJREFUeJzsnXmcHFW1+L+3qnqbnunMJAGigOwEUMBIQII+CbJJUFAE\ngQcugCKLiALPJ+YJCgLKpoi/iKLsAR6CCw9EFIQAsglCkC3sQUACSSbpmZ7pparu74/bNdOzd/d0\n91R3zvfzyWdmbm7VuVX31Kl76t57jtJaIwiCIAiCIAiCINQWa6obIAiCIAiCIAiC0IqIsyUIgiAI\ngiAIglAHxNkSBEEQBEEQBEGoA+JsCYIgCIIgCIIg1AFxtgRBEARBEARBEOqAOFuCIAiCIAiCIAh1\nQJwtQRAEQRAEQRCEOiDOliAIgiAIgiAIQh0QZ0sQBEEQBEEQBKEOiLMlCIIgCIIgCIJQB5rO2VJK\n/YdS6lal1JtKKV8pdUAZx8xXSj2ulMoqpV5QSn2xEW0VBEEQBEEQBGHdpemcLSAJPAmcCOiJKiul\nNgVuA+4GdgQuAX6llNq7fk0UBEEQBEEQBGFdR2k9ob8SWpRSPvBprfWt49T5EbCf1nqHkrIbgGla\n6wUNaKYgCIIgCIIgCOsgzTizVSm7AncNK7sTmDcFbREEQRAEQRAEYR1hXXC2ZgErhpWtAFJKqdgU\ntEcQBEEQBEEQhHUAZ6obMEWo4s9R11AqpWYA+wKvAdkGtUkQBEEQBEEQhPARBzYF7tRar6rkwHXB\n2Xob2GBY2fpAWmudH+OYfYHFdW2VIAiCIAiCIAjNxBHA9ZUcsC44Ww8B+w0r26dYPhavAfzkJz9h\nxx13BMDzPPL5PNFoFNu2xyyrpG4mk2H58uVssskmJJPJio6tRHat5YwluxFyRpNRLzmnn3465513\n3rjXUo/rEV0Lj64N14F6ySnnWkTXpk7XyrEFzWLXRNeq07V660A9rkd0rba6VksdqMf1iK7VV9eW\nLl3KN77xDSj6CJXQdM6WUioJbMngUsDNlVI7Aqu11v9SSp0HvFdrHeTSugz4WjEq4RXAnsDBwHiR\nCLMAO+64I/PnzwfAdV3S6TSpVArHccYsq6Rud3c30WiUuXPn0tXVVdGxlciutZyxZDdCzmgy6iUn\nmUyy2267jXst9bge0bXqdO2DH9S4rseTT1I3HaiXrpVzz0TXpk7XyrEFzWLXRNeq07V660A9rkd0\nrba6VksdqMf1iK7VV9dKqHh7UTMGyJgLPAE8jtlzdRHwD+D7xf+fBWwcVNZavwbsD+yFyc/1TeAY\nrfXwCIWCMCZLlsCLL051K4TxWLpU8cwzTff9SBAEQWgiFi+G4nd4QSiLphuZaK2XMI6TqLU+aoxj\ndqpnu4TWxhhWh1UVbYkUBEEQBKGV+PrXYfVq6O+f6pYIzUIzzmwJgiAIgiAIQsPZaivz89lnp7Yd\nQvMgzpYgjMEee+wBgB41QYCwLhDogLBuI3ogiA4IgQ50dJi/16xR49QWhEHE2RKEMQgMa2/vFDdE\nmJB6OcQywBJA9EBonA6k0xF8vyGihAoZrgNZycIqlEnT7dlqJJ7n4bruwO+lP8cqq6RuaVmlcqqV\nXQs55ciul5zRZNRbzurVLqWPSi36e7zrEV2rXNeMQ+zUVM5U6Np496xRckTX1g27JroWPl0rFDwO\nPng+p57axw9/KLpWSzm1lO37PmDR1+dX3e56XI/YtcboWjWIszUO+XyedDo9pCyTyYyoN1pZOXWz\nxc8i2Wy2ajnlyK6XnOFljZAznox6yXnrrQwwDQDXrb6/J5IjulZ+2XA5q1crwIR87e3NoNT4x5dT\nNhW6Vs49a5ScWuna6tWKaFTT3t4aulYPOaJrYtcAVq/OAXD33Y7oWp3k1ELXCgUPsFi7Nld1u8uR\nI3Zt8nJqqWv5fH7Uc5VDUzpbSqkTgdMwYd6XAidprf8+Tv1vAMcB7wNWAjcDp2utc+PJiUajpFIp\nwHi0mUyGZDI5JOnZ8LJK6vrFtQLxeLxiOZXIrrWcsWQ3Qs5oMuotx7YHk/HlcjBtWnX9Xeu+aZSc\nZtC1UhsZiSRpa2tOXRvvnjVKTq11bcYMh+2287n//jUtoWuN0gHRtXXPrr35pvlp20p0LcS6FpRp\nHQPyTalrYtcqlxONRqmWpnO2lFKHYnJrHQs8ismbdadSamut9cpR6v8ncB7wJeAhYGvgasDHOGxj\nYtv2iGRm5ZaVUzfowEWL2jjkEIdtt62P7EDOZM5ZTlkj5Iwno15yXHewXj6vaiq7FvesUXLCrGul\nexzyeZtUqjl1rVb9XQs5tdS1Z5+1ai5b7JroWqvZtb6+YFCoRNfqJKcWupbLGXuWz4tdWxd1rRqa\nMUDGN4FfaK2v0Vo/j5mx6gOOHqP+POABrfX/aq1fLyYzvgHYpTHNHR+t4cwz2zjggKluiTAWuZL5\nT9kQG05KZ/cnsaxaqDGy0T+8eJ5EWg0bPT0S3a4ZCPJr5cZdGyUIgzSVs6WUimCSE98dlGmtNXAX\nxqkajQeBnZRSOxfPsTmwALi9vq0tj0zGeMwleweFkFHqYOVy8jIMI6XOljxL4aG7e6pbIIyF48Bx\nx011K4RSAmdr+J5TIVwEYwL5+CqUS1M5W8BMwAZWDCtfgdm/NQKt9Q3AmcADSqk88CJwj9b6R/Vs\naLmk0xHAvPiEcCIzW+FHZrbCycoRC7uFMPHLX051C4RSxNlqDgadLekooTxaZYivgFEXRCil5gPf\nwSw3fBTYEvipUurfWusfjHfShQsXMmPGDAC01riuyxFHHMGRRx5Zs4avXSvOVtiRma3wIzNb4UQ+\nTghC+fT1mfeLLL8NN4WC+Sn2rXW58cYbWbx4MY7joIpfP1atWlX1+ZptiL8S8IANhpWvz8jZroCz\ngGu01lcW/35GKdUO/AIY19k655xzmD9/PgCu65JOp4dEVKkFPT3G2ZrEvjuhzoizFX7E2Qonk4iU\nK9QRmf0NJ2K7mgNxtlqfww47jAULFpBKpQYCZNx7771VJzdvqmWEWusC8DiwZ1CmjMu5J2Zv1mi0\nYSIPluIXD53ykXM2a7wsMbLhpXQZoWyIDSeyjDCcyPMSTvr6proFwmgEg3gJXBJugvGa2DehXJpt\nZgvgYuBqpdTjDIZ+bwOuAlBKXQO8obX+TrH+/wHfVEo9CTwCbIWZ7fpDMbjGmEyUjboWGaoLBePv\nmmWKXkXHViNbMpJXXtbX52G2Cpo12pPp73Kup166Vq2cZtC1/n5F0Ef5vF9z2Y3StfHa3Sg5tZRd\n+uxU2+56XM+6btd6e/1x69XjesSuTSy7UDBDEtelZXStFe2a6SeF6+qq212P61nX7VqjdK0ams7Z\n0lrfpJSaiXGYNgCeBPbVWr9brLIRUDpPdDZmJutsYEPgXeBW4H8mkpXP5+uekTxwtnzfHyJrMpm1\nR5MT/JSM5JXLMVni2wDz5bHa/p5ITr11bbJywqxra9ZEgA4Aenv7Sae9cY8vp2wqdK2ce9YoObXQ\ntTVr+gj6pdp2lyNH7FplctasyY/4/2bXtVawa/39Qf4mn3S6ty5yxK5NXtcCvyGbdatudzlyxK5N\nXk4tdS0/iXXxTedsAWitFwGLxvi/jw/7O3C0zq5UTjQarXtG8ny+p3iURSqVKvvYSmRLRvLJyYE4\n7e2a3l6F61LTjOSTuWeNktMMuuY4gyuCI5EEqZRVl76px/WUe88aJaeWumbbbSV9Bh0dza9rjdKB\nesrx/fiAjGQy2RK61gp2DXJFeVbL6For2rVguWcwhG5GXWtFu1ZvXYtGo1RLUzpbjaKaDNOV1LVt\ne2Bmy3XViOzekpG8smupl5xCwSKZhN5e00+1lF2Le9YoOWHWtdLZfa2tptW1WvV3LeTUQtdct/Ql\n1xq6Vg85jda1YK9wvWSLXauuzPfNl3PPUziOXfHx5ZSJXZucrlmWjesGUSNVzWWLXQu/rlVDUwXI\naEUCZ2vwS4kQNgoFaCt+oJdAJuFEohGGk9IN5JNY7i7UGMkPFE4KBdMvYsPCS6kdE5smlIs4W1NM\nPi/OVtgpdbakn8KJRCMMJ+IEhxMJyR9OgvdL4HQJ4aN0DCA2TSgXcbamGJnZCj+FAsSLWxw8T16C\nYaT0+RFnKzwMndmSZycslA4SJcx4eAhsl9iw8FL67IizJZRLUzpbSqkTlVKvKqX6lVIPK6V2nqD+\nNKXU/1NKvVU85nml1Cca1d7xGNyzNcUNEcakUIBIBBxHi1McUoa+AGVQHxZKnS2xceGh9BmRfgkP\nsoww/ATPjmVJPwnlM3LXWMhRSh0KXAQcy2CerTuVUltrrVeOUj8C3AW8DRwEvAVsAqxpWKPHQZYR\nhh/XBccxDpcY13BS2i/yVTg8yDLCcFL6vpF3T3gYXEY4te0QxiawY4mE2DShfJrO2cI4V7/QWl8D\noJQ6DtgfOBo4f5T6xwCdwK5a62AY9nojGloOsoww/AQzW8bZklmTMCJLO8KJzGyFk+HOlhKzFgoG\nlxFKh4SV4NmJx2U8IJRPUzlbxVmqnYBzgzKttVZK3QXMG+OwTwEPAYuUUgdikhpfD/yomINrTCbK\nRl2LDNWBs+V5UCi4+H7tsoKPViYZySsvy+d9HAccR1EoTK6/y7meeulatXKaQdfyeQWYsKyFgo/r\n6prKbpSujXfPGiWnlrKzWZ9gtbrnqZbQtXrIabSuFQqDG7VyOY94vPl1rRXsWj5v+sV1aRldazW7\nlssFOZ30wHPUjLrWinatUbpWDU3lbAEzMSOqFcPKVwCzxzhmc+DjwHXAfsBWmITINvCD8YTl8/m6\nZyQvFCIDf69alSbImTaZzNqjyQl+SkbyyuX09bkoBY7j4Lq1zUhei3vWKDlh1rVMJkEsFieXU/T2\nZkmnC+MeX07ZVOhaOfesUXJqoWuZTA5IAIz57DSbrtVDTqN1ra9v8PlYu7aPeLz5da0V7FouFwPM\ns9IqutZqds3YNIjF/AFnqxl1rRXtWr11LT+JMK7N5myNhQLGiqlkYZyxY7XWGnhCKbUhcBoTOFvR\naLTuGcldd3Byra0tRSw2uazgY8kByUherRxwiMchElG4rqppRvLJ3LNGyWkGXbNti3jcLFuLROKk\nUomm1LXx7lkz6pptxwaOd11aQtcapQP1lGMWiRgikTag+XWtFeya1sFXeUVHRwqlml/XWs2u2bYJ\nTZxIWOhiKM9m1LVWtGv11rVoMBtSBc3mbK0EPGCDYeXrM3K2K+DfQF7rIQFunwNmKaUcHVi3UTjj\njDOYMWMGAFprXNfliCOO4MgjjxxSr9oM1bZt43mDzdLaIdCJarJbjydnsucsp6wRcsaTUS85nmeR\nTEIkonHd+mQkr2V/10tOmHXN9yEWM8+S71s4jj3u8eWUTYWu1aq/ayGnFrrm+4MBbz1PtYSu1UNO\no3XN90sHXbWXLXaturLSj6/gUFq9WXWt1eya1samtbWpgT2pzahrrWjXatnfN954I4sXL8ZxHFRx\nU+uqVatGnKdcRrYgxGitC0qpx4E9gVsBlLkLewI/HeOwvwGHDyubDfx7PEcL4JxzzmH+/PmAWT+d\nTqeHeOe1wPMUjqNxXSVBMkLK0AAZU90aYTRctzQX2tS2RRhEEoCGE4lGGE6GB/qJRMauK0wNQXj+\neBz6+qa4MULdOOyww1iwYAGpVGrAAbv33nvZY489qjpfM+bZuhg4Vin1BaXUNsBlQBtwFYBS6hql\n1Lkl9X8OzFBKXaKU2koptT9wOvCzBrd7VFzXImFWPMlLL6QM5tkaNLRCuCgUGHiOZFAfHlyXgX2o\n0i/hQZzgcCJRVcPP0GiEU9sWoXloqpktAK31TUqpmcBZmOWETwL7aq3fLVbZCHBL6r+hlNoH+DGw\nFHiz+PtoYeIbjucpEglNT4/MbIWVUmdLjGs4kUF9OAlmHPN56ZcwUTr7K++d8FD6MU/6JZxIni2h\nGprO2QLQWi/CRBQc7f8+PkrZI8Bu9W5XNXieIh4fDPcqhI9CwTha4myFl8DZUkrLMsIQUSgYZyud\nNrZOCAcyqA8nZumgT6FgybsmpAS5tWRmS6iEZlxG2FLIMsLwE6ydlz1b4cV1jTNs29JHYcJ1ZXln\nGJHlauHEfDTyBn4XwkfpzJZ82BPKRZytKSZYRgjibIUV2bMVfkqdLXkBhofSwCUyeAwPpX0hz0t4\nKBQgGjURCeV5CSfBOE2WEQqV0JTLCBtFJpOhu7sbMDH3s9ksvu8PhJkcraySuj09PXjedBzHBRy6\nu9OsWZMv69hKZPf09Az5Wck5K5HdCDmjyai3nFwuhecVUMoim/VYs6anqv6udd80Sk4z6FpfXxug\nsCyH3t5+ursLdembelxPufesUXJqqWv9/UkiEQtwyGRyrFmTb3pda5QO1FNOJlNAqThaK9au7aWv\nr6/pda0W7W6UnLFk53JtA87W6tVr6ejwm17XWs2u9fZmgQ4gS6EQHfHsNIuutaJdq7eujZUguRzE\n2RqH5cuXTyqJWTm47m54XgaI889/Pks22zPhMdWybNmyup270XIaeS2ZzHq8++6/6e+fRnd3gSee\n+Gdd5DSCVpXz7rvb09cXQalpvP76mzz22Os1l1FvWlHOypUJPC8CdPLaa2/wxBMr6yKnEbSSXXv3\n3W6i0SS5nM2yZS/R1tZdFzmt1DeNkNPb+2EiETOwe/LJp3n33frFFm+Ve9ZoOcuXvwWsx5o1/yaX\n24jnnnuuLnJa6b61yrUsX7686mPF2RqHTTbZhLlz5wKDXm48Hh/h+ZaWVVK3p6cH17Xo6moDYOut\nt+ODH8yXdWwlsnt6eli2bBmzZ8+mo6OjonNWIrsRckaTUW85lhVj443fwzvv2DiOx5w5c6rq71r3\nTaPkNIOudXQksW2IRCze856NmDt3/abUtfHuWTPqWjLZOZAraNasjZkzZ+Om17VG6UA95XR0TCeR\nMElZN9tsK7bdtqfpda0V7FokEsfzTKbcbbf9AFtv7Te9rrWaXVt//Q0B2HTTWWhts+222zalrrWi\nXau3ruXzeaqlaZ0tpdSJwGnALExI95O01n8v47jDgOuB32utDxqvbjKZpKurCxia1DhIcDZaWaV1\nPU+RTFpFeSk6O8s7thLZAR0dHRVfTyWyGymnVEa95XieRSqVoK3NJ5+Hzs7Oqvq71n3TKDnNoGuW\nBW1tPpYFsVicrq5kXfqmntcz0T1rlJxa6ppSEZKmK4hEYnR2Jppe1+opp1G6plSURMJizRpIJNpp\na/OaXtdawa55nkckYpYRtrdPo6ur+XWt1exaJGIi/nR1JfA8TVtbW1PqWiPltIquJYOXWRU0ZYAM\npdShwEXAmcAcjLN1ZzH/1njHbQJcANxX90aWSWnod082KoeSoQEypro1wmhIgIxwEoR+B2PrhHBQ\nGrhEnpfwYKIRGmdL+iWcSIAMoRqa0tkCvgn8Qmt9jdb6eeA4oA84eqwDlFIWcB1wBvBqQ1pZBq6r\nJFpXyAmcLRP6XQaMYSTIhSbOVrhwXYjFzO/SL+FB+iWcuCWh36VfwkkwTovFpI+E8mk6Z0spFQF2\nAu4OyrTWGrgLmDfOoWcC72itr6xvCyvD8yyZ2Qo5ktQ4/AQzW5YlMyhhQgb14aRQUDKzFUIKBSWh\n30NO8K6JREBrhe9PdYuEZqDiPVtKqela69X1aEyZzARsYMWw8hXA7NEOUEp9BDgK2LG+Tascz1MD\ngxExruHEdWUZYdhxXTOrZdsa3xdnKyzIcrVw4kr+s1AiywjDT6GgiETM+wbk+RHKo2JnC3hLKfV7\n4Nda67/UukGTQAF6RKFS7cC1wFe01hXFt124cCEzZswAQGuN67occcQRHHnkkbVoL2CWpcViMrMV\nVrQ2/SLLCMPN0JmtqW6NEFAoDM5syRfg8CBOcDgxH/bE2QozwbsmiLkgzlZrcuONN7J48WIcx0Ep\nM+5btWpV1eerxtn6CvAl4E9KqTeAK4CrtNbVB6CvjJWAB2wwrHx9Rs52AWwBbAL8nwruWHH5pFIq\nD8zWWo+6h+uss85i9913B0yUoEwmQzKZxC0+XZ7nDfkZMFr5aGWu6+F51sCXrFzOK/vYamWP1/Za\nyq6XnNFk1FNONmt+WpaHbRvDWm1/17pvGiWnGXStULCxbV3sIx/X9Ss6Zxh0baJ71ig5tdU1jW1r\nlFJ4nmoJXauHnEbrmnGCfcCiUPBH1KvH9Yhdm1h2oWANOFv5vDvkfdOsutZqdi2f10QiGqV8wB4x\nJmgWXWtFu1ZLXTvkkENYsGAByWRyIPT7kiVL2GuvvaiGip0trfW1wLVKqc0wTteXgO8qpe4BfgX8\nTmtdfTD6ieUXlFKPA3sCtwIUnag9gZ+OcshzwPbDys4B2oGvA/8aS1Y+nyedTg8pGy2D9FhZpSeq\nm8lkAbBtszatp6ePTKZQkZxyZGez2YGf1V5POWWNkDOejHrI6e01OU8KhX7AwXWdqvt7PDmTuWeN\nkhNmXcvnU2jtYlkRstkC6XT/pOU0WtfKvWeNklMLXcvnfXw/j23H8bzW0LV6yGm0ruVyPsmkC0Tp\n68uNeexk5Yhdq6ysUOgc+PiaTveRTrtj1q22TOzaZHXNxbY1+Xwf0IHnqabUtVa0a/XWtSnJs1Wc\nDToTOFMptRdmT9QVwCKl1GKt9derbtXEXAxcXXS6HsVEJ2wDrgJQSl0DvKG1/k7R8Xu29GCl1Bpz\nCXrc1N/RaJRUKgUYjzeY2SpNeja8rJK6mUyQT8N0QyzWRjLplnVsJbL94vqdeDxe8fVUIrsRckaT\nUU85tm3W2qRSCRIJTaGgqu7vWvdNo+Q0g675vk0iYdbRW1aEVCrSdLo20T1rRl3zPItkMjYQJbIV\ndK1ROlBPOb5v0dFh3juOEwPyTa9rrWDXPG8wQEYsliSV0k2va61m1yBCNKpIpdqKdcWuVXItjZJT\nD12LRqNUS9XOVila67uAu5RSnwV+CZyImTWqC1rrm4o5tc7CLCd8EthXa/1uscpGwKRX0tq2PSJp\nWrll5dT1fdOBbW1mdaPWZilUrWUHijKZc5ZT1gg548mohxytzc943CYa9fE8XVPZtbhnjZITZl3z\nPIhGLSzLR2sLx7EqPmcl11LL66n0njVKTi10zXUV0ajCBC5pDV2rh5xG65oJzGSeEd+3ai5b7Frl\nZVqb5Z2BswU2pdWbVddaza75viISUcRi5m8TnKm5dK1RclpV16phZAsqpJgo+Cjgi8DGwD3Aryd7\n3onQWi8CFo3xfx+f4Nij6tKoCgmCLUho5PASRB8MAmRINMJwIkmNw8nQfpHgMmEhiKjmOPK8hIWg\nHyTPVrgpFBSOMxggQ+yaUA5VOVtKqRjwWUwS4fnAm5glfFdqrV+rUdtansHkeHrI30J4CPrEDEzM\nMsJRgl4KU4w4W+EkSJsg/RIu5HkJH4PvGolOHGYKBST0u1AxFTtbSqlFwGGYPVJ/APYH/lxMLCxU\nQDBLkkiIcQ0rxrkaTGIofRROgsTTEvo9XAT9IoP6cCFOcPgIxgMysxVuPE9CvwuVU83M1keB7wPX\nTnFy46YnWEYYiYBS8tCGkdJlhI4jywjDivlSr2XwGDJkBiWclCZmlX4JB8H7P9izJeOBcFK6BBek\nn4TyqNjZ0lrvUI+GrIsED6kMRsLL0GWEktQ4rAwmNdaSPDdElM6gSL+EB3GCw8fghz1JahxmTBAT\n2bMlVEbVATKKua0OBvbAJBQeEv5La33Q5JrW+gTGVb7Ih5fS2cdIRL5ihRUZPIaTocsIZVASFgIn\nWAJkhIfBZYTibIWZYM/WoLM1te0RmoPJRCP8CfBVTPTBFTQ4aoBS6kTgNGAWsBQ4SWv99zHqfhn4\nAvCBYtHjwHfGqh8wUTbqyWaozuWCXE4+jqPJ5fyyj61WtmQkr6ws6CMwiQxd16q6v2vdN42S0wy6\n5ro2lmU+WriurrnsRujaRPesUXJqJbtQ8PB9UMrDsiw8rzV0rR5yGq1rhQJYlo9tKwoFPaJePa5H\n7Nr4ZSYvqzMws5XPe7iubnpdazW7ls+D4/iADzi4bvPpWqPktKquVcNknK3PAwdprf84iXNUhVLq\nUOAi4FgGkxrfqZTaWmu9cpRDdgeuBx4EssC3gT8rpbbTWv97LDn5fL6uGcn7+synLN/PYVmavr4c\nmUy2IjnlyJaM5NXLyWRMxvBcrhffd/C8CL29GdQoH+lrfT211LV69U2j5Eyka67bVXyOouTzLul0\nZtzjyylrtK6Ve88aJWeyuhbYMtftx7ISeF5r6Fo95DRa11wXfD+LUnGy2fyYx05Wjti18su6uy2g\nc2Bmq7e3n3Q6X3M5Ytcmq2selqXIZvuBabhu8+lao+S0mq7l8/kR/18uk3G21gKvTOL4yfBN4Bda\n62sAlFLHYaIiHg2cP7yy1vrzpX8XZ7o+C+wJXDeWkGg0WteM5JbVC0BbW5RIROE4MZJJu+rs1mOV\nS0by6uVYlkmC1tXVTlubLupFkkSiNhnJJ3PPGiWnGXTNdSGZjGJZoJRTE9mN1rWJ7lmz6ZptxwHo\n6EjgOArfpyV0rVE6UE85rqtob48RiSgsKwpkm1rXWsGuBfk2A2crEkmQSsWbXtdaza75vk0i4TBt\nWrJYVzWdrjVKTqvpWjQapVom42x9DzhTKXW01rp/EuepCKVUBNgJODco01prpdRdwLwyT5MEIsC4\n0RSryTBdSV3fD4yshW0rtLaxbV1z2YGiTOac5ZQ1Qs54Muohx/fNVsREwiEWM1PIWtc+I3m9da1e\nfdMoOePpmm07xT0oFrbtobXCceyKz1nJtdTyeiq9Z42SM1ld0zqwbzaOo/G85te1VrFrhYIiGrWw\n7UEb18y61ip2zZT5WJYGbEr/q1l1rdXsmutatLdbxONW8e/m1LVWtGuN0rVqGNmC8rkJOBx4Ryn1\nGjAkKLbW+kOTOPd4zARszD6xUlYAs8s8x48wiZjvqmG7KmZ4WHEJvhA+SvsoEhlaJoSDIMqd44Bt\nwySWVQs1JAiIMdgvEiAjLHie5NkKG8F7xbYlYFaYGR4gQ8ZtQjlMxtm6GjPDdB1TECBjFFQ5bVBK\nfRv4HLADtJtDAAAgAElEQVS71nrcBZgLFy5kxowZAGhtNt0fccQRHHnkkbVor4R+bwKCaISOI8Y1\nrMhzFE5K0yZIsulwUTpglH4JBxKduDkYGY1QPiK1IjfeeCOLFy/GcRxUcZP+qlWrqj7fZJyt/YF9\ntdYPTOIc1bAS8IANhpWvz8jZriEopU4DvgXsqbV+ZiJB55xzDvPnzwfAdV3S6fSQdaeTZXCQKMY1\nrMjMVvgZHKTIoD5MlPaL2LfwoLVZRij9Ei5KxwPiBIeXfF4Ny7M1te0R6sNhhx3GggULSKVSA8sI\n7733XvbYY4+qzmdNXGVM/gWMDC9SZ7TWBUzo9j2DsmLOrz0x0QZHRSn1X8BCjIP4RL3bWQ6FwmAO\nJ1lGGE6GL/UE6aewUTqzJc5WeBi+jFCSGoeDoB9kGWG4KF1GaFla3jMhpTRRe/C3IEzEZJytU4Hz\nlVKb1qYpFXExcKxS6gtKqW2Ay4A24CoApdQ1SqmBABpKqW8BZ2OiFb6ulNqg+C/Z+KYPIsufwk8w\nYJSZrfAy9DnSMqgPCaXLCMW+hQfPM6/94L0jg8VwEPSDbfvyvISYQoEhM1vy/AjlMJllhNdhHJyX\nlVJ9jAyQMX0yDRsPrfVNSqmZwFmY5YRPYmas3i1W2QgofQSOx0QfvHnYqb5fPMeUMHzZgDy04aN0\nKZTj6CFlQjiQQX04GbmMUPY2hIFgH6o8L+FCAmQ0B4WCkj1bQsVMxtn6JlMYFENrvQhYNMb/fXzY\n35tVI2OibNSTzVCdy5nbZ1kelqUpFCaXLb6cupKRvNJs8WaaRGsXy/IHyuohu566Vuu+aZSccmRn\nsy7gFJ8j43zVWnYjdG2ie9YoObWSHTw74GJZFp7X/LrWCnYtGBxalodtW7iunvC6a3E9YtfGL8tm\nFSbcu8ayoFDwcN3JjQmmWteGy2kFu1YomNlH8AGTdqTZdK1RclpV16qhYmdLKfVxYInW+qqqpTYJ\n+Xy+rhnJ+4vZyTwvi1IefX0umUxfRXLKkS0ZyauX09fnEoloenrSuK4NREmn+0mnRz50tb6eWupa\nvfqmUXLG07Xu7l6gk3w+i23HKBQ80umeSctptK6Ve88aJWeyutbXZ4K95vMZlGrD95tf11rBrgUz\nW/l8HxAnl/PHPHYycoKfYtfKK0unI0BHcRmhT19fgXS6v+zjyy0TuzY5XcvlNFrn6evrB6bjus2n\na42S02q6ls+PG8B8XKqZ2foV0KmU+hPwB+AOrXXDA2U0gmg0WueM5Fls26etLU40amPbFsmkqjq7\n9dhyJCN5tXIgQiQCqVSKjg5TFokkSKWsmsiezD1rlJyw61os1g5AKhUfSAZaC9mN1rWJ7lmj5NRK\n12w7BsC0aUkiEQvP85te11rBrgVJjKdNayMatVDKyG5mXWsFuxaJGCfYtjWOo7DtGKlUpKl1rZx7\n1my6ZpIaR+nqihTr0nS61ig5raZr0WiUaqnY2dJab66U2gE4ABMk4yql1APArcCtWuvlVbcmZFST\nYbqSuoWCRSTiF8sUvq8GOrWWsmtxznLKGiFnPBn1kOP7FpGIwnEcYrFgetqqeUbyeutavfqmUXLG\n0zXfN+XxuIVtu/i+akpdq2V/10LOZHUtCMQQjzs4jo/nKWx75LPTTLrWCnYtmNmKx20cB3xfjXls\nPa5H7NroZYPJ2c2eLa0tHMcq+/hyy8SuTU7XXNc8O9EoWJYu2rXm0rVGyWlVXauGqqIRaq2f0lr/\nQGu9C7AFcAuwH/C8UupJpdRZSqm5VbdqHcEkxzPr5R3HfCERwoXrmnw0INGHwkqwsTwaBduW5ygs\nBIN66ZdwUZqoXfolPEjezeYgCJAB5vmR8YBQDpMJ/Q6A1votrfVlWusFwEzgB8CmwJ+UUt+Z7PnH\nQil1olLqVaVUv1LqYaXUzhPUP0Qp9Vyx/lKl1H71alu55HKKSCRYciPGNYwE2eJh8GcwWBHCQbCM\nOhKRPFthorRfxL6Fh9J0FtIv4SH4aGRZWuxYiCkdEziOOFtCeUza2SpFa53RWt+stf4CsD5weS3P\nH6CUOhS4CDgTmAMsBe4shoMfrf484Ppiez4I/B74vVJqu3q0r1zyeYY4W/LQho/RnC0J/R4uSme2\nZJASHobPOEr+s3BQmmdLVlSEh0LBzGopJf0SZvJ5Y9Mg6Cf5+CpMzMiFjBOglPp6GdW01vpS4N0J\na1bHN4FfaK2vKbbpOGB/TNLi80epfzImkMfFxb/PVErtA3wNOKFObZwQM7NlLKoY13DiumrIVyxT\nNnXtEUYyfAZFBvXhoFCQZYRhRPJshRPXHXzH2LaW90wI8TzQWsnMllAxFTtbGEdnIjRwaRXnnhCl\nVATYCTh3QJjWWil1FzBvjMPmYWbCSrkTOLAebSwXmdkKP6UvQJnZCieBsyUzKOFieL/IF+BwMHwZ\nYTFqsjDFmPGA+V2c4HASzAqXzmzJuE0oh4qdrWoTBNeQmYANrBhWvgKYPcYxs8aoP6u2TauM0j1b\nMrMVToavzwYxrmFj6HI1Lc9RSAhmtmQGJVwEzpYEyAgXhQJEoyZglvRLOCm1aSDjNqF8qpnZCisK\nM6NWs/qZTIbu7m7AxNzPZrP4vl8S2nhkWSV1M5kIkYhPT08Pvp8kl1OsWbO2rGMrkd3T0zPkZyXn\nrER2I+SMJqOecvr6CliWQ3d3D5mMB8xk7doM3d1eTWRP5p41Sk7YdW316gjQTl/fWnw/Qj4fpbs7\nXZe+qcf1lHvPGiWndrqWBZJkMt14XoJCQbFmTU9T61ojdaBeclzXfJ3v61uL5yXI5aCvr6/Jda35\n7dratXEcx4zitfbo6/Po7u5ral0r5541Sk4tdC2Y2crleunuLqBUimzWZc2avqbStUbJaTVdGytB\ncjnUa88WWuufVt6cslgJeMAGw8rXZ+TsVcDbFdYH4PTTTyeZTA4p22OPPdhjjz3Kbux4rFy5PZFI\nhGXLlpFOx8hmbZ544omanHs0li1bVrdzN1pOo65l1aq15PM+jz32GPm8BezJSy8t57HH3q6pnFbq\nm0bLWbZsA2AHnn32SVav3pz+/giPPfZYTWU0glaT869/rcCypvPEE4+xZs376e1N1MW+tdJ9a4QM\nz5sOwHPPPcXatVuydm2M5557ri6yWqlv6i3n9de3AN4DQKHQxzvv9PPYY8/UTV4r3LNGyykUzPrB\n5ctf5LHHVuJ5H2XFipU88cTLNZfVSvetGa/lnnvu4Z577hlS1lBni/L3bNXF2dJaF5RSjwN7YhIp\no5RSxb/HkvnQKP+/d7F8TM477zx22203YNDLjcfjIzzf0rJK6sbjMfr6+pg9ezYzZ3bR3a2YM2dO\nWcdWIrunp4dly5Yxe/ZsOjo6KjpnJbIbIWc0GfWU09bWSVdXjLlz55LPm9msDTfclLlzN6qJ7Mnc\ns0bJCbuuvfTSDADmzt2Ru++O4jhR5s6d23S6NtE9azZdmz59FtEozJ07l/XWSwzYt2bWtVawa48+\nuhKAOXO25/bbE/T3W2y77bZNrWutYNduvTVBMml+b29P0NnZxty5c5ta11rNrt1//3IAtt12S+bO\n3ZS2tiidnesxZ06qqXStUXKaWdfmzJnDCSecMKTswQcf5JOf/CTV0Ix7tgAuBq4uOl2PYhzANuAq\nAKXUNcAbWusgz9clwBKl1CnA7cDhmCAbXxlPSDKZpKurCwDXdUmn06RSqYFs0qOVVVLX9wtEIj4d\nHR0kElHWrIHOzs6yjq1EdkBHR0fF11OJ7EbKKZVRTzmeF6W9PUJXVxeu66KUJhJpo6vLronsydyz\nRskJu65FImb2eb31OonHC4BVs2scfi31vJ6J7lmj5NRK1ywrTiSi6OrqIpHw0dqns7OzqXWtFeya\n560GzPOSSABo2tramlrXWsGuWRbEYuaDXjRqY9sRurpiTa1r5dyzRsmpha4FkTxnzOigqwtiMY1S\nDp2diabStUbLaRVdG77SrRJqmmcLQCm1kVLql7U+byla65uAU4GzgCeAHYB9tdZBqPmNKAl+obV+\nCONgHQs8CRwEHKi1frae7ZyI0miEstEynGSzEI8P/i3Rh8JHEMREKQmQESZcdzBql22LfQsLEiAj\nnJQGyJB8geEk2O9YGjUycMAEYTzqESBjBnAMxrGpG1rrRcCiMf7v46OU3QLcUs82VUouB+3tEvo9\nzGSzipklqbIjEQn9HjZKQyYrJYOUsJDPqyHOloTkDweS/yyc5POlebakX8JIaY46kI+vQvnUfGZL\nKJ98XkK/h51cjuJSG4PMnIQP80XY/C6DlPAgeYPCSaFgXvuxmPRLmMjnB2e2HEfeM2EkmNmS941Q\nKeJsTSG5nCQ1Dju5nBqyjFBmtsKHDOrDSaGgZFASQgJnKxKRj3xhwjhb5ncZD4ST0Wa25PkRykGc\nrSnEzGxJEsMwM3zPljhb4SOXM1/pQZarhYmRM46ytyEMFApWcWO/vHfCROlHI9mzFU4CZyuwa2YZ\nodg1YWIq3rOllPrtBFU6q2xLKHn4YfOzGEm6ppTObMkXknCSzQ6d2bJtLcY1ZAx1tmT5TVgIApeA\nOMFholCwZMYxhJQuI7Rt86FPCBfDA2TIni2hXKoJkLG2jP+/porzhg7P85g3z/yezXoDZQArVsDa\ntR6zZg2WlR5X+nOssmDPlud5KOXjuqrsY0crK6euW7QMk5FTjux6yRlNRj3lZLMQi/m4rumnSMQh\nlzN/1+t6atnftZATdl3r6/OJx1XxOTKDx1rLboSuTXTPGiWnVrLNs6Nx3aBfVFXtrsf1rMt2LZjZ\nMv1iDXw8amZdawW7ls9bJBJBNEKN6w6+d2opR+xa9bKDZ0UpF9cF27bwvObTtUbJaVVdq4aKnS2t\n9VFVS6sBSqku4GfAJwEfE2HwZK31qKmdi/W/D+wDbAysBH4PfFdrnR5b0o+59NItBv66//4sH/rQ\nYAbpnXbqZMUKh1WrMmNmlR6tvLQsl5tGJOKTzWbx/RyFQnTg/yc6dryy4eXZ4ieybDZLOp0es95k\nyxohZzwZ9ZEDkCWdNn87zjSy2QLpdH+N5VR/zxolJ6y61tOTx3EcMpkMth3D86iJ7MbrWnn3rFFy\nJqtrfX0ejuOSTvfg+wk8L9r0utYKds3MbPmk02lcN4HrRsc8djJygp9i18or6+/voL3dDBSVcsnn\nLdLpnprLEbtWva4FM1u5XA/ptEapdlxXkcn0Vn0to8kJfopdm7ycWupaPp8f9VzlUM0ywivKqKa1\n1sdU0Z5yuB7YANgTiGISGf8COHKM+u8F3gOcAjwHbFKs/x7gc2OL2YulS2fygQ9onn5asffe07jy\nyh5WrEgwa5ZixQrz0K1erdh447YRGaozmQzJZHJIhurhZcHMVjweJ5GI4fuKZDJZ1rGjlY1V7hfX\n78TjcVKpVEXnrER2I+SMJqOecnI5xbRpMVKpKJ7n4TgaiJBKRWoiezL3rFFywq5rWkdJJFSxjovn\nURPZjda1ie5Zs+ma6zokkzapVIp4HDxPNb2utYJdKxQKxGKKVCpFMmnh++ZrfTPrWivYNc+zicfN\nl/NIxEZrh1Qq1dS6Vs49ayZdy+XMuG/99Ttob4dYTOG6XtPpWqPktJquRYP111VQzTLCLwHLMcmE\nG7p5RSm1DbAvsJPW+oli2UnA7Uqp07TWbw8/Rmv9DHBISdGrSqmFwLVKKUtrPcZOgu258sp7mD9/\nPg8/DPPmwdFHt6P10Et+6SWbTTe1R83Mbdsjy0vL8nlNJOJj2zbRqFWclrbLOna8suHltThnOWWN\nkDOejHrIyeUUyaQ1kP8kEgHPs3CckbFlan091fZ3veSEVdfM8hvzu227aK2wbQelKjtnJddSy+up\n9J41Ss5kdS2fV8Tj5lmJRDx8v/l1rRXsmllGCI7jFO2ZHvPYelyP2LXRy8yerSDhtML31Qg9bDZd\nq+TYRsmZjK4FkTzb2x0cBxzHJ5drPl1rlJxW1bVqGNmCibkMOAzYHLgCuE5rvbrqFlTGPKA7cLSK\n3AVo4MPAH8o8TyeQHtvRGsquu8JDD7nMmzfydr32WnUBHX3fhEYOAmREIrLRMmx4nsJ11bA8WxKN\nMGyUBsiwLDNwNAP7KWyUQDar6Ogwv9s2TGK5u1BDgj1bIP0SJkzkW+mXMJPP29i2xnECpxj6+qa4\nUUJTULGnoLU+AbME70fAp4B/KaVuUkrtq5Sq90zXLOCdYe3xgNXF/5sQpdRM4H8wSwnLZu5cOP30\nPubPH/TP1ltP89JL1Y3ogqWfgbMVjQ6WCdXz0ku1e0nl8+bxkNDv4Waos2V+ykBl6snnh4bklz4J\nBxKNsD5cdBE8/XT1x/f3MxAgQ/qlNrz++tAoqPk8vPZa9efL5awhH18dR0K/C+VRzcwWWusccANw\ng1JqE8zSwkVARCm1ndZ65G7BcVBKnQf893gigW3HO0WxzkRyOoDbgacxQTPGZeHChcyYMcM0QGtc\n1+WYY47gqquOxPPg9NM1f/1rZIKzjE4uZ34GebbE2Zo8d98d4XOfczjpJPjpT03Zv/+t+PSnbX7y\nE/jQhyo732jOluNomYGsAbW8h9kszJxpfg9ms2SgMvWUpk2QwWN4yOcHZ7bMYHGKG9QCrFwJp50G\nG21ks3Rpdefo7x981ziOpLCYLC+8YDFvnsOWW8Lzzxun65hj2vnjH50hOc0qYfDZMQ7WumbXCgV4\n7jn41rcsLr5YUbINq+W48cYbWbx4MY7jEMwjrVq1qurzVeVsDUMX/ymqT5J8IXDlBHVeAd4G1i8t\nVErZQBewYryDlVLtwJ3AGuCg4ozYuJxzzjnMnz8fMKGk0+k0qVSKYEnnhz+suekmh5UrXWaVNa82\nyKCzJTNbteLXvzaf0S+9FJSC44+HCy9McP/9ir/+tVbOlgxOJstdd8Heezs89pjFjjuasttvh2XL\nFF/6UuXnG57UGNatF2BYyecHnx2TpFW+AIeB0pktmamvDN+HU0+Ft96CBQsU++9vyu+7z/x84w2F\nnvCz7+gYZysI/S7vmcnw5ptwwgntgFntsu224Lo2r75qBm9Ll1aXOzXY7xiwLo0H3ngDNtsMZs2C\nt9+2uPDCBL/85VS3qn4cdthhLFiwoDjmN3pz7733sscee1R1vqqcI6VUTCl1uFLqL8AyYHvga8D7\nKp3VAtBar9JavzDBPxd4COhUSs0pOXxPjKP3yDjt7QD+DPQDB2ita+LWbLml+Tl3buVLCfuLkcOj\nUa/4s/mcrXPPVfzXf5lrmerB7QsvwJ13Rjn9dOO8/vSn8P7321x1lRntvT0idMrEBM7WyGUDk27u\nOs1PfmJ+3nxzlMWLzeDkk5+EU0+1eeONyk3SaM6WJNCdPFpPbj9CLqeG9Iv0SWVcdhkcfrjFW28p\nbryxdo5qPm8NDOrF2aqMX/0qxsUXw403whe+YHPqqW1stZXNJZcM1qnGhkGwZ8v8Lv1SPT09sP/+\nNq+8YnHVVR6WBS++CK++OvgMPfNMdefO5eyBpZ6w7iwjvP76KOedZ/Q6GEtde22Mf/1rChvVZFQ8\ns6WUWoQJkPE6ZjbqMK119XNrFaC1fl4pdSdwuVLqeEzo90uBG4JIhEqp9wJ3A5/XWj9WnNH6CxAH\njsA4a8Ep3x0vSMZECdJ23tkDbN58U5HJuAMDi3ISpK1dC+CQSHh4nodleWhtk8+HPyFjd7fHHXdE\nOPNMM7K98EJTfvTRcebPj9dMTrnX4vtwwQWmT7/61QI77uiw9daar37V4u9/V8yapVm+XFecIDKf\nN9fnOCaBoed5RCKKfF7XJUHfupD8s7cXHnjABhQ//GEbAEqZ5wjgmWdsZs+uTHZ/vyYa1cXnyLwI\nc7nBPqv2Gke7lsnet8n0TT3l/OMfPldd5XPJJT6+b8ouvdR8xV+71qWtrXI52SxEoyYxq2VpPM8K\ntV0rR3Yj7drxxzuAxc03dwGwzz4unZ3wve/Bzjs7LFhQnZxcziYe93FdF8tSFAo2WjdO15rVrmkN\nl15qXvLJpCaTUQMf8157Df7jPzT336949VWLbbapTLbWkM06Ax9fHcenUDCJpxuha7U853hyGmHX\nfvtbxTPP2Pztb2vZaacE992nuOKKoQ7wSy8Zu1SpnGAZYXA9lqWGvGcqOV+511PL+zOW7PHkPPOM\nz0kntQ859sgjPX73O4u5c23+/neX971v3dK1aqhmGeFxGEfrVWB3YPfR4mJorQ+qulXj85+YpMZ3\nYZIa3wycXPL/EWBroK34907AzsXfXyr+DPZ4bYa5llHJ5/PjJkiLxeDuu7Pss0+Kiy7Kc/vtEY45\nJsfBB+dH1B1+/IoVNjCNRMIjm83ief1AO93dGRKJcCdkPPzwdu6+u2NEnSuuSHDFFf/BJZesJpvN\n8fnP57j66hif/ayipydDMjkYwKBa2aNdy5IlDlddZRYPt7dn2HtvU/f3v4cVKyzOPz/BSy/ZQ669\nHDnBzJbnZUing5dgO7mcRzpdfRLDWvdNo+RMVtfefjvDSSe1k83aLFrUywUXJHj1VZsvfnFwZnj5\ncqss2aVy+vt9lMqTyfRjWWYh/po1Pdi2HvP4cspaLSHjeHI+9SmHd96x+NSnetlxR5d4HC6+2Nj1\nJUv6+MhH3FHPOZ4cs1TaJAQvFGJ4nhNquzZW2VQk/+ztBZg+pP4GGzjMm1fgoYcifOADbey+e+U6\naexaO5GIRzrdh+dFgXZct3kTzU5WTjmyr7zS4lvfMi+W3/0uzcc+5nL99dEhg9Ajjshw//3tvPlm\neTastMysdJmO45ixg1Iu2ayu6J1Vblmr2rXu7iwf/rDilVdsNtvMY5ttfDKZDD/8IZx4osWZZ7bx\nz3/abLCBz/PP+6TTmTHPOZacfD5ICG7e/1q3tbRd+/e/FZdfPriPYostPF5+2Wa77bIcdZTL4Yd3\ncM45Lhdc0Dfi2Ilk1PJ6WjKpMXANZQSjqBda6zWMncAYrfVygs/k5u8lpX9XQjQanTDZ2wc/mOEz\nn/E580zj261a5XDkkRGy2QwPPdTOe95jscMOI48PHNR43CMejzNtmlmrFokkgfAmZMxm4b77zP/N\nmqX561897r1X4Tjwu9+53HFHjJNPNoOERx5J8NvfWtx/f4Q//jHKvvv6nHqqZrfdXHK52iSvu+oq\nxXe/a5yiW25J094+WDeZ9IjHM2y6qc3999sVJYj0fX/A2ZoxI0kqZeo5jvkCWasEfetK8s/e3gyH\nHtrJI49YnHeex9FHRzj00LVceGGK885z+NCHNPm85tZbo3zjGw6OU35CxnzeYtq0GMmkhW0bY9jW\n1jHQZ7Xsm8net8n0TT3kvP22xx/+UOCdd4yuL1hgyk84oZ833zTHLV2aZL/9dMVycjlFKmUSgicS\nGs9TtLUlx+3bqbJrK1fCCScozj47zZZbmiT1775rlsxsvHFjk3++8UaKs882/fGXv+R56qkCl12W\n4MUXLR56yHxMePVVm1gsSSxWuZxcziaZtEilUqRS5j1UKEBnZ3Mmmq1l34xWns/Dueea/pgzp8D+\n+7ehFBx9tEcy2cPee0d59FGHT30qzsKFmh//OMGXvxypSHbwwby93fRvImHjeaqpkhqvWJFhyZIO\nCgWLX/3K4lOfipJOz2L27MbYtRdfbOeVV0zZkUeaIWpQr6sLfvMbj0xmLf/zP9P4618dUqnK5eTz\neRIJNXA9iQR4HjXTtXrotOd5PPhgll/+sp0rr9T84x/wvvdBPD5Uztq1ZoXDrFkZTj55GptvDr/+\ntdH7HXd0uf9+j3jc5g9/8NhrLwffz7Hvvponn4yRSjnrxDu0oUmNtdZfqlpak1Fu0rPPfx5uuQV2\n2AGWLlVccIHDW2+18ctfmo75zGfg8cdt/v73zMDxwZ6teNzDtm0SiaCDbRwnnEnyXNdh8WLzYr7t\ntjTz5rUxfbrDtsU4kZ/9bJq//OVhXnhhZ84+u43f/tY8qH/8o7kPd95pceedADZvv23uxQ03OLz1\nFnzhC2bfmm3brFjhsOGGI9vT0wOZjM3112/GwoWd3H23w1e+YuqcdZbH/Pkutt02ou3ve5/izTcV\nL7zgsPXW5V23ecma9nd0OANBUWIxj3SapkzIOLysrw8yGUil6q9rDz7o8MgjFj/6EZxyik3wvebj\nH1ecfz6ccoqirc3noIMi/P73HocdZo8pY7icbFbR1qawbZvApio12GeVXE+5fVPtOSuVU0vZw+Vo\nDZ/+tM3jj8fYfHPNT36iOOAAc9yiRQna2zVz5yoeecSu6F4GcrJZRXu7SQgeiZjRpFLhsmsrV9pc\nconDAw/AQw+B47Tx+ONRzjxT8cUvmjpPPeVw442bstNO9U3+uWpVlCVLYhx8sCk7/3yYP9/iQx/K\n8aUvxViyxOKgg6CzU7NmjWLpUpvddqtcTi5n0dZmEuYGe4RcV9VV16o530R1XRcOP3waqdTW/OpX\n9Utq/Kc/wZo1cMYZSznuuPcRiXQN1DnwwAKpVIKDDjLXe8ABPldeaXPHHebZKld2sD8rmTTObyym\nyOfDndT4vvscXnvN5umn4ZFHbKZPV9xxx2CIv4ceage2J5XqY4MNHHbYAT74wbHPN9nreeqpKLGY\n2Z81a5bZuzX8WKVgjz3gsssUzz3nsP32lcnJ523i8cH3fyTi47o6lOO1oOzqqxVf/vI0wOw1DLCs\nGey6647Mnt3BkiUOr7xiyufPV9x7r8U99wzuUZ8zxyWRcHAch4MPBtfVpNOwyy5www0K1x18364L\n79BqqGZmSxjGggWDYVq32AK+9z2b0sm03/0OQPGHP0Q56ihTFsxMJhLmwMBhLs1NEyYyGfNgvfAC\nHHigz7x57oiwn0rB9Ol5vva1HJ/4RBu77ALnnefx8MMuRx3lDHn5zJo1nUsv9fj618169W9/2+Hg\ng5OceKIxhjffDNGoYrfdTP3ubpg+HUzgSfOyO/xwc99eftnMso0ySw3A7rubgf3PfjYYEr4cgj1b\npXynhK4AACAASURBVNEI29o0fX3h3xDb2ws33aQ45BD4wQ/g/e+P8Prr6xON2nzsY2Y/zbRpDrvv\n3sEZZ8D228N669WnLXfeqTjggBTvf7/mv/5LodRgkJH58zWFgtGdQkHz0Y8W+PnPHQ47rPzzm/w0\n5vfAFoY1GEOhAGvXmoAHc+ea9kajJkiI68Jvf2simwX7IOuF1nDrrfD444qDDspx4YU2m23mcP/9\nMHOmyx/+kOOQQ2L8/OcOv/1t5ef3PDOzlUyav4N+mcSS97pw3HEWt91mft9iC81vfmOMb+BoAeyw\nQyfQyZIlHmedZXL32LbimGNq25aTTvowK1fGsW3z8e7AAwefk1TKfLR7+GHYcEOPbbaxue++QftY\nCaWb/EvfO2FEa/PMBOOfpUttfvxji6OOMvsJX301AmzCXntlaGszHz5rzdNPmz1aH/nIO8Tj7xu3\n7gUX+Fx5pcXllys+/enyZQQfX4N3f9gDZHge7LNP6cBTYbbQGy65BE4ubu74znfaBso/+UmLE05w\nBpb614oHHliPs85q48Mfho03Hj+I1ac+pVlvPbj2WvNBoxLyeYuOjqEBMsJm00p55x348pdNP22z\njeb5583YpasLursVDz64Pg8+OFg/GtXce2+E7bbTzJql+OpXoavLY9tt+4CRcd533tmkwnnyyeqi\nO65LiLNVI4Jta48+Cq+95rLzzg4dHZrNN1cDeTcWL44NOFu9xS0/sdhIZwtgxQr4+c/hv/97aDS8\nd96BDTaAO+5Q7LJLnS+qhCuvVLzwAsyZA1df7U9oYHbe2VzD9OmadDpDKpWip8fs2dpgA01vr+Kk\nk2wSCdhwQxOe9eabY9x8szn+4IMBbE4/PU42a/HjH4+Ucfvt8MMfwkYbjW9ct97avJivvBJOOQXe\necdi553Hrh+Qy40M/R6PhztjfKFgwt4uWpTgggtsjj8++J92YEfOPnto/SVLIuy5J0ybZgb5HR3m\n91oSLMG56CIfk6lhKMGzoxTsv3+e733PGRKZayJK6wYBMsL4Anz9ddhkE4fgY8EgnWy//Vz++c/B\n8m99q/zrrxTfh733hrvvhk031Vx+eWZg6cVHP2qepa98JUcqFWP2bHjlFfjHP8zMfbnkcqaf24rj\nrGCwHKZ+eflli2efNcp3ySVmWdi55+ZZb70Yp5xiM2MGlKZVefllmyOOCP6y+chHLDbcENZfH1av\nNna5Wnp6YOVK0+H77GMcrdH48IdN/+y6a4E//9nh29+uXFY+b5FIGIMZ5BoK68D+uOMsrrjCtHPv\nvS3++EdjnP7v/8z/z5jhM21ahhNOMHuIZ80y9n7aNOPA9Peb/nn5ZSimzCSfN3mx1i8mkclkTB6m\nXXYZtEUBr7wCp58Oc+Z4I/5vNKZNg9NO6+faa+NoPfJ8YxG8U9raAidYh9YBBnjqqZF2/HOfy/HL\nX9qk0w4bbwwHHtjNPfc8y1FHfWSgzm23Wdx2W4qLL/bo6zMfEZJJmD5dcf31Sf77v2GnnQbPqTVs\nsYXNrrvCNtsYZ3p6cSvjihXm429/v81ZZ5kps9mzJ257JGI+1P7sZyY32vTpEx8TYAJkDP4dzPyE\nlf/9X/Pz5JP7Wbgwguc5RKOm3W++uYazz15Nd/dGnH9+lMceg+XLfb7/fZtDD/U54wzTx8Es1mhs\nv735QPDtb8PXv64oZkoSRqHavFjCGMycaabK//GPNTz5pMdf/gLPPguLFnncd5/Dww+bepmMMaxB\nwIjA2fr3v+H22yOceKLFOefAUUeZMKVPP23z5z8r/vY3U2+//eyBXF29vea4n/7UDOhc1xipRx81\ng6reYbEcKvnq77pwzTUxLr7YYs89zaAr+Fo9EeuvP/Tv9nYz+Hr7bY/rruthv/18rr3WvDi//GWf\nY4/NjjjHeee18eMfW+y+O3zjG/DUU2s466wnOPPMPmbOhBNOKK8tX/qSGRBttZXDRz7Sya9/bd6C\nvg8nn2xx7bUxbr5ZDQkRn80aY1N6vYmEHvgKGUZOOgk228zhggsSE9adO1cPOCdr18KOO8Lmmzu8\n9Zbiz39WvPOOGQQ88wwceGAHa9YYvS3NI5PPD34gCF46nqfwPPO368KyZWYAsueeE2/1/MhHXHI5\nxQMPlHe9gYzgg0TwPIVpUA+mjaU5SW64wRvyJbDU0QLYcEOHAw7o4N13a/8yf+01i7vvNrlnfvWr\n8Y1B8AX6llsqkzHc2QrboP7mmxW77NLJK68obrgBvv5149yedlqWk07SnHKK+Zjz4ovw3HNruO66\n+/jb39YOGZgddFCKTTd1aGszH3zOP18N6SvXhUsvVZx33vjPYm8vPP+8uV+/+EWGa66ZuP377Vfg\nvvsUy5dXfu0mGqH5PeiXMIavXrt2MIpcoQB//OPQ4coXvgAPPJBmk00GX3Cf/rSxYdddFyWVcthq\nK6O722xjbGImY1ZObLihcaSPPTbJ1lubwfxtt5nlgv398ItfxLjtNsUPfmBm/C+9dPRN9qPxwQ+6\nrFiheOON8q81eEcnk0OTTVebs6uevPEGnHZakmRSc8st8KMfQaHg8vOfmyBYG29s6qVSsOGGffz5\nz2l6euCOO+CEE3za2jSnnGLzP/9jnrtjjoHPfMbmN7+JccABNp/4BNx7L3z1qxbbbdfJ668rbroJ\nzjoLPvEJm+uui7LDDjabbAIf+xjsu+/gjMvnPlfeNSxcaPp5gw3MMjvPg0MPtQY+9o6FeXYGO8W2\nw+tsFQrwne/AIYf4nHFGP11d5mPE9Ommb2bN0hx11Mtcf32G7bc3Y83Pf17zhS9kOf748hQvGoV5\n82DJEvjsZ6tfYrcuIDNbdWKTTXxSKWM011sPNt9cc+65PosXW3z0o8a4Bl+xYPCl97nP2axYMRjp\n75Zb4JZbHGDkdMNWW3Vx++0ee+45OLg8+WQHmM7XvuZz113mi9373tfF5pvvyuWXa4491ix17Oy0\n2GqrOP/5n3DFFWNPp192meKb30wW21aLO2O+hOy3X4FDD/VxHPMC/fnPfZ5+OssDD8T42tcUJ5wA\nZ5/t8d3v2my5peaee8zys+5uza67ruRDH9qUU09to2NkUMRR2W476Ow0L1OACy80gUuOPhqWLbMA\nc4177AF//aup099vltvY9uBAxCwjrM19qDWFAkMGav/7vx7Ll9vMmWMGzccf/zxPPbUF3/hGhHnz\nYOZMj7/9LUNfXzvf/a7Nk0+a47bf3gz8N93UvFhd15iJnXfWvPWWyYl1+eXG8dp//xTvvGNz8snG\nsN9yi8NnP7vXQBv22QdWrVLstVceEyh0fLbd1ijy3nvDj38MX/va+PUDxzdwtsI2qAd47jmYP984\n+6ec4nPKKWvYYIMUhx1mZuUuvzzDVVdl2XXXJNFonPvuMx81/va3CPvvr1m2DK67ziwjA3jpJYvZ\ns8efgXzxxbG/2L70knnm/vQneO97x/5qCQwMaF59tbJrHv6hIvgaHJav9VdfbZ7p3Xf3+fSnR35z\nvOiiwd+7uzXrr59j22193njDzFZ87GOav/996HELF9qce24Xxx+vmTbNDA4LBRtIsHChCdv+5pvw\nr38xsHf0+uspzpalaG8vcOCBeWbOnPhr1sEH5zjjjDY23dQMWN/7XsWWWzpjzogFaD00z1aYlxH+\n9a/mYb7wQqP7zzzz/9k78/CoqvOPf965k8kkk4SEfRFRRBa1UgQX1FYQrYpV61YX+Gm11roWcWvd\nlxatu9bWVmnrVor7gtW6oKJFRQURN0QFQdxQICFhkslkZs7vjzOTTFZmklmT9/M8eSY5Ofd+z73n\nnXPve5b3hPF4NlNe7mPdOjc//Sls2mQ46qg1rF7dn//7P4err7bHzphhowRWV8dmScD11xc1e879\n8pcunn666RUotl7RvhbZdBH7bNxhhwiLFydW7h/+0L59/+MfcPzxdtSyvt7FbrvZ7+W227Y+JuZs\nlZTYdjVWLw0NzSP4ZpvNm2HPPR2++UY46KAIRxxhv0cdORwTJoQpKYEDD4T99ovg8zUwb56X444T\nHn64ac+rPfZoYNGiAr7+muja7tYXvnKlNNZtPDfd9DZHHz2CoUNbzhpom/79bbv26qt2mt0JJ9g1\n5o89BtdeC8OHC1DA0KHWmYhhvztNf7vdJtpRkXte8emn2/qaOTPx3vVhw+CWW2qbBbPYEnffbZ9v\na9bY71sSh/Yo8s7ZEpEKbOj3n2JDvz8KzDDGJNT1JCL/BQ4AfmaMmZe2grbAcWDSpAbeeMO+dfj9\nTb1Y0NS4rlvX9GL/wgtNPcu7797A99+7WbXK9rYtWxbh4YddTJrUvAqPPTbCAw+4+POfmzdUq1aV\nMmWK/X3lSrANWTGzZtm0Y46xUwSNsS+wMSdm0SKJjuYIBx6YijvRPltvHWHZsjBut5sf/QhGjjQc\neGAVI0aUINL8Ol0uEna0Yvn/9z8wJsQbbwT49a9L2Guv1vlee61pSkcg4EQd4qY6KSrK3WmEb79t\n627OnDD9+29m0iRf4/Stl1+upr5+LbNmDaCiwj6QQiH44Q/DlJUZJk2yL4JPPRXm0ktd1NcLq1c3\nP//q1fY+xNYVrV/fVCcXX2w/jzyyeaU8/zyMG2eYMCGxoSaXyz6Q5893MXMmPPigw6WXujngAPv/\nSKT5i2F9fSyqp/3b44nts5WQXEa45BIXGzfal7mLL440m1rk9cL06UFGj36HCRMmUFHhxe+HL74I\nccghwtKldjrbEUfYTpEf/xh2370cgKuuslGlpkdjs65ebdchvPMO/Oc/bmbOLOLss2HQIPvStnat\ni5/8xDYoffva0ZhERrmHD7frVpIhELDtT2xkK5de6jdvhpdeEmbN8nPhhYWNHT6JEHPqzz47wgkn\nOCxbFuLvf3dz6KG2rfb7hRtvbD1KtO22th5t2+vmwANL+Owzh8+im5E4juEXv/gMj2dgQuUoLYXD\nDjPMnSvRdagOhYWl1NQ0/55VVQmPPy6cdJL9btXXgzGS050TMV55xc0OOxjOO8/ez623NlRXhxs7\nMWNsv30NS5ZU43ZX8O230L9/hD/8wcX114e58ELr9J9ySoR+/QK8+66XtWtdDB0KTz/dVO9nn207\n4tats21WDGPg1FOTGykfNMhwwAERrrrKxVVXAbgR6cWNN0Y47zw79apPHxdXXuk0PoNajmwVFNjP\nYDB904k7w9tv23Dg117r5/jjC+nM5Kjf/a6Oa64pwO12c8UV9iX9iy9ClJf7uf32MmbOdPGXv8BJ\nJ4UIBjczenQJ//mPm0gEvv8+zBdfBDn22AKKi90MGADvvFODx1NFSWsfrEPmzbOdSNdfH+G++7z0\n62f4/nvhoovArrm3z7JHHoEjj7THNDTYfbZi5Oqarbfess7+wQfbJR0ddah1lW22scGF3norTBeC\n9XV7cqjPJGH+DYwBpgAHAz8G7kzkQBGZCYTJUjfEjjuGWb7cPtjsNMKm/8Ub6cEHB1mwIMR++9np\ngQ88EOaZZ2r44x/tm9Fee8H990eYPdu20L/5DVxwge0BvP/+CO+/X4njGIYMia3f8XPGGR8Dtrdj\nzBi47row8fsQ7bYb7LGHw29+42OrrRxuucX2Jj74oItp0+oJBGx5MsVOO9mXg+HDI0k3oh2dc8wY\nOPLIIPfc09RCxn4/7bQIwWBTT2iTs9VErk4jDAZpnC992GGGXXZp/gQYOzbcYQ9pSYmd737OOYbV\nqytZsiTEnnvCM8/ArFlh/vMf21qPG2fzr18P++8fYdYsP3ffHWb06KYFsjNmfMTKlVVUVVmbfOyx\nxNY7xHj66Ujj6OKiRcLhh5eyfLldY3TuufaFNza9Jrq9RuPLY66NoEQi8NprwhVXwJNPJrYezueD\n7beH6dPrGTfONAZ1OflkGDGi6S3ziivs1I+pU13st18Z557r4uqraQz4cMstRQwf7qaoyPY2jh1r\nxUUM8+cn3mO+557WgduQxNb1LUe20uFsbdwofPFFcav0QADef9/+vmyZHVlYvdrFd9/ZtveWWyAY\nFCZP7ryHcdxxhq+/3sgOO9jp2/vtB48+Gubdd6s45JAIN99sbdTvD3H66QGqqoSVK+2sAoBnn/Xw\n2Wf2S/GPf8A331Rx6KFJzDsD/vznCLfe2vR3fb0NvDJ/PixeDEce6eKUU0o45RSHu++O3RurGQuQ\nkavTCD/+GO6/38vkyYk/qktL4c474YorInzzzcZoW2Zfqv/61wjnnx9g3rwIS5fS6OQ89VQ1u+5q\nuOwyOyvguefg229DfPnlRk44IcKf/tS5nvrHH49w7rlNfxcUwHnn2e/E++/DggUujjmmtLEdiwXM\nij1vctUJXrEi1jFQz+DBqTnnsGF29GjIkAh33BFhzBi7nmrsWBg1KkJpqV2rdeKJ9vl05ZV17Lab\nXa4xaBDsuWfn5vH16mXPcc89Ee64YzPvvx/m4YfttEiA7bcPs88+Ea6+uul5U1/vajaNMOZsXXih\niyuvtE5zoiOgyVBVBfPmFfDrX7s6nJ66cCFcfXUR999vG/fZs1NflrYYNAgOPtjkVMdArpFXI1si\nMho7KjXeGLM0mnY28LSInG+M+baDY8cC52A3OG43Xzxb2o062R2qd9opTDAofPBBiOpqFz5f0w7m\nbneIWHXsu28De+xhQ9v27Qs/+1kYvx8OPTTE++8bRo+2xxx+eJDttgsybpyr0SEJh8MMHmz49NMg\npaUO5eVw9NF1jBixlmnTBrDrrqWI2HwHHVRHJFLMp586XHmliyVLhCVL7Ntq/INi0qQGQOLW5HRt\nR/JUpXV2R3IROPbYENOmGZYvh5Ejw4waVcPOOxfxt78V8t//QiAQJhBw8PlMs2vxeg0NDUJdXYiC\nAuvMGhNm9WqH774zHHGEzXvXXYYf/ciFz7fl8vzvf/3xemHixM7b2lVX2QfzxIkGtztMff2W71t7\nOh6P7Rh45RWbtu++Yfz+EJ9+Wk+/fg4PPmh7xn/+8xB+fz0+n5vp0w3GwNKl1Wza9BVlZf3x+ULM\nmBHbryLxujUmzO67Q+z70NAg7Lxz86Zq6VL7WVtrv0MFBSHC4XBjj7DfHyIUyr6tLV3qsHGjsM8+\niZUnvm5+85sAF13k0NDgMHOmi6IiG2zkuuv83HZbMV9/bV+QX3jBBbga7wnABReEmD3bRV2dNI7+\nAfTtG+Dtt/1stVWvhO/PlCkQibjZfnuH448v5rbbbPrDDwvV1Yajj259fGzNlsdjr9txIoBDIBBJ\num2orLRTVfbay2HFCsOgQWHefReee66YV17Zi223reTDD8OEQraXfMWKEp591s1tt4WZMcNhzBgX\ny5fb0cCZMyPccot9ERkxIpJ0ex5f9sLC5nkPPjiM3x/h4YcbsFtlgOOEmT49wJw5hdx3X4SDDzbc\ncANcfLGbn/40zA03GLbbDjZtSt7WfL4wZ55pvx+77BLm9NMdTjghft2Ei1h/6iOPRDjxxAh+fyRa\nL1bHOtxugsHEn2PJpiVb3198Abvvbq9j6tQQoZAroePjdTweiETCDBli12fF53Mc+3w7/PAwAweG\n+N//go31BVBebturu+5qqsdkr8flCnPddXDppbBxY5jzz4cnnmiKrDB5coSXX3axenUD225r16c5\njgu32x7vdtt6qq0NUVwcJhgEvz/c2HmRrXbtvfdcjBhhO09SZQPpuJ5knqHGhDnmmCA+X0FjBMlj\njgkTDteyapWP/ff3MHt2mCOOCBMMOhQWhhp1XC6orHQaA3jdcouhutrNmDFl1NQ4LFoUYsAAq1lZ\nKY3vA8bAbbcJkQjcdFN54z6H48YZrrjCxUMPbUd5ueHNN8OMGAEHHeQmNtLmOBE2bACvV/jtb128\n+KLhoYcMY8YY7r67aQrsBRdE6NcvktA9z+X3tc7qpNPWOkNeOVvARKAy5mhFmY8dqdodeLKtg0Sk\nCDsidqYx5jtJsJs9GAx2egfvttJ33NHqXnZZBI8n3NhDEggEcJwaYlHKBgyItKszeHDTkLAIjB27\nmUik9TBxr172+Orqpt21t97aT01NU6+M3ctqM0OHwr77wogR5VRWujj77DqWLHFz//2beeMNNwcc\n0IDf37qLrTM7knc1LdU7kg8ZYnsVR46EQMDP00/Xc/DBZbz/fpi6Ojdeb9Nu8QDFxbaL/sMPNzNz\npo+FCwuwXyP7IF25spJHH/Vw4YU+hg4t5Z57NlNXJ42OFMCSJQHWrRP+/ncvY8YUcNNNY7n55gjX\nXVfL22+7uf76Wtzu5GztxRfh6KPr+etf/Y29pKnelb53bz/hcNPIX1s6gwfXsmlTamzgySfdDB8e\n5qqrinnkkeb7Ibz4oovJk2HTJmuX4bAfvz9MYaF9aFVW1jJnjg09vtNOwsaNAUaPDrep01bawoVu\nxoxp/57dfruXDz6A88/fzPbbR1odH+OZZ4qoqIiw447Vzb6jydbNpZfav887z36edFI9xsAPflCO\nMbDbbiGeftrDp59W8s47bqZMaeC3v4XHHvNw6qklXHVVLdtt56eg4CPc7u2orpY2ddpKKy+H8vJy\nqqpc3HmnlyeeiPCDH4SYP98DOOy9t/Dpp/X8+MehxuuIjWxFIjVUVxvCYQfwsGJFgLfecvHtt4LH\nY3urn322nr32si/+dXVwwQU+Dj00yE9+Yuv2mmuKuPXWIlqvW7UaU6Y0rdOwI8/2sTZjhv3/8uVN\nQ3i33OLCcQxnnRXAcRKzya5+d0aPhlWrKhGx7fHQoQVAKRdcsJn+/cPU1HStXTv5ZPv37bc7XHdd\nES+9ZNuo0tIINTUuLrmklhtuKOLLL2uoqrLzax0nSHV1OLq9RS9CIeHZZ+spKzOMHWu/J4895mHl\nSi+7717P3LmF/O53dQwbFknougOBAKtWlTBwYD0uV3L3bPZsL47j5eOPq+jXr/WawlS1awMHtp8v\nlToVFXDzzdYhDgTgmmuK+dWv/Lz8cinPP9/Accf52bDBi8/npb7e6kQitp6++moz773n4q67fDzx\nRCGzZ2/GGDjkkCAej9VZtsyhpkbYe2/7/fv661o++8zF+PFN7d2bbwYYMCDCJ584jB7deVtbuLCs\n8bxdeVYn+s6UDp1EtGNLFHbZZTM77VTG66+HmDo1QDDYC8dpoLra1k8kUkj8a3SsXV2+3KZttZWb\nAQMi/OIXQa67roJly6p4912HDRtcXHBB67WZS5cKP/tZL6AX//53m8Vk9uym9mzBglK+/tq2c4sW\nNW/TTzxxE9XVTe973eF9rTM6qbS1YBemZuSbszUQ+C4+wRgTFpGN0f+1xy3AQmPMf5IR83g8Se/g\n3VFe8LP77hFee62AiRMNpaX2ZcLr9dK3b9NalwEDIlnZkfy550IsXNjAGWe4o0EhShkyJIzf35CQ\ndjp2Ps/0juQ77WTT1q/3EgjUUVpKs2vx+eyX7eaby1i4sPU8rO22a3rxW7vWYcoU+4I4dWqESy4J\n8dJLIS67rGnqU2zDZ7/fxVln2eHJzz/3IBLm2WcjuN1bru/KSj/Llzscc4zQq1dZl2wgU7aWSNrU\nqTbtr3/1c+ONsGKFm5Ur4cknhTfeKGLyZDDGOmF9+vjw+cJ4PHaO59y5JTz0UPP62WUXw9SphpNP\nbmD9+jp23rmIQMDB5bI9/S+8UM8vflHKhg32oTV0qI+77lqO1+ulqqqMW25xse++hpkzXaxZY/PM\nn+/hjTfCbLtt6+uZO9dw660FnHJKmN69278Xid6z+PSyMpv+4YeRaG+en0jEx6BBpWyzDYTDHvx+\nP8cfX8DXX4c59VQPbnctS5cGO1U3Tz0V4bXXItx7r32RWLeuad7zTjtZm//ggxCjRtnriY1sDRhQ\nSlkZ9Oplr/Hyy0v46KOmenn6aQ8LFhQwfryhshIOPTTM3Llu5s4tZNttDWvWkNA6gL59Dfvua7js\nshATJhRw7LERnn/ehc8Hn30mXH55LVdfXRwtZ5htt3VF181mpl0rKWlKO/roMGPHVjFyZFFK27UJ\nE/w884xh48YQvXtDKBRm1apaSkqKuf56+M9/yhrD95eVFVBWVkK5HfBj1SoXZ55Zgojh5pvt6N8X\nXzR/eXv9dQ9FRYZp0wIcdpib3r3tPRw4MMxf/hJubNd+9rMIZ50lnHbaRPr0ieDxCLvtZli5Evbf\nv55rr3V1eC0ffuhi992hXz/TqedBLrZr4OfCCx0cx+GXvwwxZIiHn/wkyO9/7+OXv/QSCrkoLRW8\n0XlYJSXW6C+9tCw6cm351a/sM+KAA8IsXOji3HPD/P739jWuf39DKGTYemt4910XL74YYujQMJs2\n1bHPPr1wHEM4LFxwgZtddy0gEvHyu9+V89FHwuLFdlbEI49UNbPV+Ot59lk377/vMHOmfYFPVd10\ntc66UjeJ5N1+exdff+3B6/USDLrw+dyUldl6il9m8OmnIdxuWLLE8OqrIZ56ysvnnwvr1rm47jr7\n3Rg7trwx/w9/aADDQQcFGDq0gDPOKGDECMP33xscp4GNGwvZb78IK1YIt9wS4qijCrj33gZOPLEA\nx7EjWR980DoC4OzZmxkwwMOoUaUdXnd3eF/LtK15urAoLSecLRG5FvhtB1kMdp1Wu6egnXVYInIo\nsC/ww2TLdfnll9MnujmHMXY62bRp05geW5EeJZkdqi+/3HDwwS4++ECiU6VsPq+3Kd+AAdnZkXzc\nuBDbbVeP4xR2SjvVO58ney2p0Bk82I0IfPedm7o6h4oKmuWrqLBf6kWLXBxwgI3o9sILYbbbrrqZ\nozVrVphZs1yNGyA/84wr6lg1fVnHjbPT4Q4/fA2PPz6sMf3VV+30n0mTDEccIVxwQcflfuMNN7W1\nwv77O80WjqdzV/r20tOh43LBkCEOw4bZcweDcN55Baxe7WPgQJtWWurGcZoCZMybZ6fWxm978M47\nwjvvCH/4QyFQ2LiAfupUcBwXTz3VfPRs7VqHe+4Zwe23l9Gvn5t777VrCeKpqhLGjHHzl78IyUIz\njQAAIABJREFUGzZ4WbWqgL//3UUoZNeelZZGuOkm02mb3tI979vXBjqxUaBa5y0sdLjiCnv+ysrO\n183ee8Mee4Q47rhqVq4s41//cvPPf8LOOxvee8/a+PPPu9lxR3tcbGSrrMyN2w1er+11j3e0ABYs\nsItTliyx57j1Vqv3i19EWLjQRSRiA3k8+miI9evtg6+qys2YMfDFF9WsW/cOxcW7cNBBZdipzi6+\n+qqSXr3KiEURDYdDVFcHOPpoDwUFbkaNcjdOGctWuzZkSCRt7dqgQbFzwpAhhrIyh9GjheXLHUaP\njq2lc+F2uxvXDf/tb/bl0Rhh5symF5Mjj6xnyhQ3VVUOF18sgHDllcVceWW8aqwX31BZKTzxhIsn\nnrBt4YYNtr6ffNLWxQcfFHHUUSEmTnQ3W8fpOA4ul5sTT7RbgVxwQSSh687Hdm34cPudPfXUWo46\nysPatW7q6uz6xphObIT+f/9zUVBgOOywILvt5m4M+PHcc/Yz5mgBfPedrZ+NG+3fU6bYKWUxxy0c\ntjf88ccLueGGSa2ud8ECYezYXkyYIDz6qIu491Mcx+GmmxwKCuDQQyWh6+5Ku5ZMWirqpqO8225r\nNz53uRyCQYfiYmnMF5v22b+/aVxPu9VWISZPruO88wr49ls3t94Kc+fCtGn1fP11ARMnurjmGjj/\nfOGYY8JUVwcoK/NE98QUNm6s4pVX3sXrHR9t1+yayg0bNlJSUsbzz8M55whjx4bZtKmKb78tw+Nx\nc/rpNpLyEUcEKSvz9oj3tXTa2gMPPMCcOXNwu93EZsNtSGbhcgtywtkCbgTu3kKeVdi1Vs12bxK7\nS2oFsK6d4yYDw4FNLaYPPiYirxpj9m1PcNasWUyKRh0IhUJUV1cnFRKzLYYOtS+Da9faMONt0a9f\nEhthKSnF7bZ7b3zzjSu6Zqv5/3v3tvX3+ec20g/A5MmG6mrDPffYdV6HHw4VFYbddqvmhRdK2Xln\nF9On230ttt8+xBNPCPff7+bnP4d77qll550/48gj+7DttiXMmwf33WcoLo7w5psOb75pN7Z2ux0W\nLnQ12/Axxvvvu/H5DGPH5tYi93Tx61/be7J0ae/G71BTNEL7GQjYdRmXXx5i9erNuFwlvPSSm3PO\naTpP7MXlmWcgtrZlxowIfr+LggJ4+OEIDz7YOk7z3Llh9tijmoEDS9ltNzdffQVnnukA9s31nnti\nOV3MnFlHcfGWQ97nCyUl8KMf2Z9bb4VXXolw1lnQt6+LhQuFmTNtvvp6Fx6Pwe22NhnfIXjWWXaD\n5P32C3H//UH23tvDr3/t5qCD7NS33XbbzMEHF+N2u1izxoZpLiigVSS60tIwdXUNrSJdxm+SHc/o\n0c2j2PUkhg+3G/S2DJARq5f333dz1FERgkEX8+bBPvvAd98ZbrzRz1ZblbFihY04utNOhg8+sOfY\neWd4770mjRdfDDNihJsddrCBFIYPryEYLOaNNxyGDrXHfvop7LWXmyOOaNq7bcUKFy++KOy8s93i\nAOCAA3IvlHaqGTnSPudXrLCdQvGBoGJrTwMBuOqqCGed5aegoIzKShvM6uKLDfvvH+C++7zsvLPQ\nvz9stVWEAw/czEsvlfDyyy7eftuea/NmG6Z9111dLF4MzzzTfDTkv/+1zt8tt0R46SWHr76Cxx+3\nUeZimwR//LGN5jt7tg0skc7odrnGNtvYtaAttxmBpvakrQ3NBw+20WJvuAF23TXMCSf46dWrDLfb\nxYwZdkugtpYAiUBJSajNCL4uV9N3JBSyf8fatf/+FzZvztFNv/KQY489lqlTp1JWVtbogC1YsIDJ\n7b24b4GcePQYYzYAW3QZReQNoFxExsWt25qCHdl6s53DrgVaxmT5AJgBJDWtMBXEfynLytp+oGj4\nzOwyciQsX+60GY2wT5+mv0eMaH7ctGmmsfENhWCXXcJMmmT3Eps2zU7rqa6uoaysjBkzbL7TTqtn\n8eIIU6c2UFFhI7/94Q9hvviihkWLypg2zYmeTzjllBJGjLAbL8Z6HdesgSuuKMbjMUlF/MtnvF4Y\nPTrM6tUlcS+P9n+xkS2wkSftJpt2z7sf/ADOPBPefDPEffeFuOsuL2efDbffDtdfH2bZshDnnecw\ndKh1vMaPr+Uf/6hl/Pgy6uq8XHghzJoFP/2pIRi0df3ee/aB+de/hlmwIMSjj9rRsTPOgM8/j3DK\nKQES2V8s33C77bqGAw80vPNONVddVc6zzzYZYCDgRF/oWztbe+xB9PsAZ51le3U//dT+LxSKUF3d\n9MIwbFgsPd1X1L3Zfnvbux57YYytF45/wR861HYghUI2QmLfvuHGl+rRo+GUU+Dcc8PcdVcDBx/s\nYb/9HBYtgoEDQ3z77WaGDy/B5YKpU60DMXPmR0ybNpKKigrefReGDQvzxz8Gue66Yh57zNrBJZcI\nF11UwocfNjkAl15q9zGrqcnU3ckOgwZF8PkMK1ZIK2cr/rkzaJD9tAFy7O8//WmY6uo6brqpoPFF\nMPbdmTIlwnvvufj73+Hcc0OcfnqEc8912G8/G/r/wQc34zjv8PjjezBtmqdxS5fCQoPjBPn88wKO\nPtp+b0eOdNi4sZz1622beMgh6b0nucg229j7tmqVtdGSkubRCMHORorfIiaeIUPg7LObrz3s37/N\nrF3C47GRM3uSI5xP5ISzlSjGmI9F5Dlgtoicjp2TdTswNxaJUEQGAy8C/2eMWWyM+Y4W67yiI1xr\njTFrMnoB2MWyBQU2clyfPs1HsJ5/HsrL9a0i20ycCPfd58bjcZrthQbNHeSWzlYqKS83/PznhkMO\ngTlzbCjh559388EHtqds0ybb+3zOOfYBcP757Tf23ZExY8IsW1bSGPq95cgWNG0cG4/bDbvvDsOH\n17LjjgWcfrrDddfZnuTqan+zkesjjmhg662XNe5/BXDvvfZlNH6drOPAaacZjj/ez557umlocLjw\nQgiHI80WKHdntt3WhtmO7dtlOyqa/h9fL+1ttqykj/33t9swvPeefeTHOieaO1uGKVNo3I8x3sF1\nHDuqEQrBZZfVUVZmOxD22MOmlZU1Pcuuugq22srPyJFNb31jx9p8F14Y4MQTPeywg5s334RDD20+\nynLzzTBzZs9wrl0u6wTHRrbiZ1HEP2dsIKvk2GUXuOMOex8ffHBzY7tWWAiHHNLA4sX1zJ7tp6Ki\n6Ys5ebJh/PjN/OtfvRqDy3zyiZ2aCHDccbazuCfUTTyxDp+PPmrtbMXaNW3TlC2RV85WlOOxmxrP\nx25q/Ah2lCpGATCS2JyetsnaG5CIbbC+/NJONYtn//2b1l8o2WOnnew0wpKSQny+5k+W+NGj7bdP\nf1lKS+G00+x6iFdfNfTtCxddZAvxxz9a52HRoirGjy8hP7fN6xyjR4d58skS6upsVCiv14bTjV9n\nEOsRbovCQjjjDDs6VVCQuheI3/zG9MipasOHWwd07Vrbu1pf33xUWJ2t7LLTTvZz+XL7BYmNbMXv\ntbZt6xmznaK0FE46KdjufkPbbWdHNufMaUp77bUQc+e6OfXU1JQhXxg92vD++0JJie2IjRHvbA0a\nlNnXldNOM0yebPd2euqpCAceWMPs2aX8/vc95/kST0fO1ogR9nd9Z1O2RN69FhhjqoDpHfx/DbGY\nwO3n6fD/6aZPH+tsxU9JU3KH0aPt5+bNBfh87e8qmc6RrZYceKDh888rgTIGDXIzYgR89pldNxEf\nerynMGaMDc3/2Wd2vzPHae0wRWPbKBlg110NLhfMnw9HHNF6Q3B1trLLgAG2Y+azz+wLc1ubj44c\nmZnnUWzdyc9/DocdBhdfXMtuu3nYc8+MyOcUu+9ueOwx2HFHO40zRvyIY6o2D04Ul8tOuQaYODFC\ndXWY+++30+F7Ir16WUe4LWcr1okRi/KpKO2Rd85Wd2DkSFi2rPU0QiU3iJ9+VtzG+Oj8+WG++MJp\n1iucCWJzsj/+2PYO33or7LVX5zfZy2fGjLHX/e67TrMFy/GUlzdNa1PSS79+9oVx8WLrbNXXu5rV\nS7yz1a9f5svX07GRCeGTTxwKC8Nttl2pGtlKlEMOgfffDzF4cID4KK09iR/+0BAM2veBvfduSo+v\nn9696fbr13KdkSNh0SL7uhzvbBUVwZIlVYwZ07NmlijJo85WB/j9fiorKwEbcz8QCBCJRJrF4W+Z\nlkjeyy5zM3FiAaNGVbFyJdTEtaSJ6iSjHTt/qnTa086ETlsa6dDp27eM9esdHKeOysr6ZvnGjo2w\nyy4OUdNI+fVs6dj+/R1qauCXv7TptbXp0cllWysrq6GoqIRlyxwKCyNUVm5qzAt26GTTpsq8sLVE\n71mmdDqrve22ZXzwgVBTU0Mg4KGwsIHKSqvV0BAG+gIQiVRSWZk/ttZd2rXBg0tYs6aA3r1DLXTs\n/DW/v6oxZH86rqetfAMHtm6/0qGTq7ZmR997E4mA213XQsfWy6ZNVXlna8ncs0zpdEX7xz/28uab\nsd6jGiorTWO+/v0D+P2hxu9Ortpad23XkrmWruq0t0FyIqiz1QFr1qzp0iZmHbHTTrBypf19xYoV\nadFoSXfSSbeGzzeR9etLqKlZy+LF36dVC7pX3WRKZ/DgclauLGPAgDoWxy0QmTLFxcaNhSxe/E5K\ndLrTPUunzsCBw3jqqe154YUvqa0diddbzeLFcbHB2R+AJUvaWczTSfL9vmVKw+vdCRiEz9fQTOeq\nq/ri9YZZurQy5ZrdqW7SoROJQEHBFBoaXAQCa1ixYm2jzgUXVLPVVn6WLk3PgqB8vWfZ0OnVqy8w\nDoAvv/yYr79Om1Qj3eG+ZVIjEzpr1nQ+pp46Wx0wbNgwJkyYADR5uV6vt5XnG5+WTN6amhpWrFjB\nqFGjKC0tTerYZLRTrdOediZ02tJIh05hoXWy9957ABMmDEtJfae6bnq6rQ0eXMfKlWWUlXmYMGFC\nY94HHvDgcrkQmZAXtpboPct1W/vBDxzmzoWPPtoBvx+GD/c2az+HDQvx/fdOytpUbdeSS9t55yJe\nfhl8vlAznXHjYvm2yRtb607tWq9esH497LbbVowaVdyoM2FC/tpaMvcsH2xtyBDh0ktt/jFj8tfW\numO7lklbC8aHIU6SvHK2RKQCG4nwp9hIhI8CM4wxHY7tichE4A/A7kAYWAocYIyp7+g4n89HRTRE\nUPymxk37WrROSzYvQGlpadI6yWinWqcjjUzpxGukQ2f8+Ho++QR22snb4bWk43rU1hK7lsGDqwDo\n18+hoqIib20t0XuWKZ2uaE+dCi++WEJtbYDevd1UVJQ05nvttWp8vjLKy1Oj3ZXr6YntWix6alFR\nuFvYWjrqJlM68WmRiF3rM3JkCaWlDWnT0Xat89qx2+TzNeS1rXXHdi2Za+mqji9+f4YkybcVff8G\nxmA3Mj4Y+DFwZ0cHRB2t/wLPAhOiP3/GOmuK0iY33FDLn//8ZptRu5TcYPDgWkCjDuYSe+1lo3Zt\n3uym5abtRUXNo6wpmWXrre1nMJhvj/3uzYkn2s9YZDslN3nnnU3cddcb2S6GkqfkTasrIqOBA4Bf\nRjcrfh04GzhWRAZ2cOjNwK3GmBuMMR8bYz41xjxijGk/prfS4ykuptmmnErusc02mwEyMn9eSYyB\nA6GhQdiwwUtpqW5tkUtstZX99Hi0nzGXuP568PttpFkld9lmmwj9+nU4GUpR2iVvnC1gIlBpjFka\nlzYfu0Hx7m0dICL9ov9bLyKvici3IrJARPZKf3EVRUknMWf4sMOyXBClkf79m35vObKlZJcdd4ST\nTw5w4YUfZrsoShxud9tbjCiK0n3IpzVbA4Hv4hOMMWER2Rj9X1sMj35eAZwHLANOBF4UkR2NMSvT\nVVhFUdKL48C6dZX061ex5cxKRoh3tnr3Vmcrl/B44MYb61i8WHvnFUVRMknWR7ZE5FoRiXTwExaR\nkR2dAju61Rax6/ubMeY+Y8wyY8y5wArg5FReh6IomaegAESyXQolxoABTb/rpu2KoiiKkhsjWzcC\nd28hzyrgW6B/fKKIONid/9a1c9w30c/lLdKXA1tvqWCXXHIJfaKr740xhEIhpk2bxvTp07d0qKIo\nSo+jvLzp9379dGRLURRFyT8eeOAB5syZg9vtRqI9uhs2bOj0+bLubBljNgBbvAIReQMoF5Fxceu2\npmBHtt5s59yrReRrYFSLf40EntmS5tVXX80+++wD2Jj7fr8fn89HKBRqTIv/jNFW+pbSOjpnommZ\n0klEO106bWlkSieV9Z2sjtqa2lqmdLqqHXusVFSE1NbU1rRdU1tTW1NbyztbO/roo5k6dSo+n69x\nn61XXnmF/fbbj86QdWcrUYwxH4vIc8BsETkd8AC3A3ONMd8CiMhg4EXg/4wxi6OH3gBcKSLvAe8C\nv8A6X0duSTMYDFJd3Twind/fekuvttISyRsIBBo/O6uTiHa6dFqmZUKnI41M6XS2vlOho7aWOR21\ntc5q9wbA46mjulo6yNc1bbU1tTVt19TW1NbU1jJlaz1mU2PgeOweWfOx+2Q9AsyI+38BdtSqMbaP\nMeY2ESnEhoDvjQ2SsZ8x5vMtiXk8Hsqi8VjD4aaRrfgdplumJZM3ErFrGrxeb9I6yWinWqc97Uzo\ntKWRKZ2u1neq60ZtTW0tF23tqKMCVFVtpKhIbU1tTds1tTW1NbW17mFrHo+HzpJXzpYxpgpod8GU\nMWYN4LSRfj1wfbJ6juO02qE60bRE8sYqsCs6iWinS6dlWiZ0OtLIlE4qtVNxzzKlo7bW/nWnSycf\nbe2uu2pYvPhDHGeC2lqCaWpr2q5lSkdtTW0tUzrd1dY6Q9ajESqKoiiKoiiKonRH1NlSFEVRFEVR\nFEVJA+psKYqiKIqiKIqipAF1thRFURRFURRFUdKAOluK0g4vv/xytougZBm1AQXUDhS1AUVtQOk8\nrUN05DAiUoEN/f5TbOj3R4EZxpi2A+nbYwYANwL7AaXACmCWMeaxLeltaYO0ttKSyRufppvk5d4m\neS+//DLnnnuubsjYg22tpQ2kSyeRa8mUjtpaa+1E2oJ8adda6qitJaadbhtIx/WoraXW1lJpA+m4\nHrW1zNhaZ8grZwv4NzAAmILd1Pge4E46CAcP3A+UYR20DcA04CERGW+MWdaRmG5qnFyabpKnGzKq\nramtqa2pramtqa2pramtdTdb6xGbGovIaOAAYLwxZmk07WzgaRE53xjzbTuHTgROM8Ysif49S0Rm\nAuOxGxy3i25qnJy2bpKnGzKqramtqa2pramtqa2pramtdTdb6ymbGk8EKmOOVpT5gAF2B55s57jX\ngGNE5BmgCjgGKAQWbEmwM5ueJZM3VoFd0UlEO106LdMyodORRqZ0UqmdinuWKR21tfavO106amvt\na6utqa2pramtqa2prWXa1jpD6xLkLgOB7+ITjDFhEdkY/V97HAM8iJ1CGAL8wOHGmFUdHOMF+OST\nTxq95FAo1Ojlxm58W2nJ5K2urubTTz+loKAgaZ1ktFOt0552JnTa0kiXjt/vZ9myZR1eSzquR20t\nd2ytpQ2kSyeRa1Fby56tJdIW5Eu7prbWOVtLtw2k43rU1lJra6m0gXRcj9paem3tk08+iZ3GS5KI\nMSbZY1KKiFwL/LaDLAYYAxwJnGCMGdPi+O+AS40xd7Vz/tuBCcBFWIfrZ8C5wN7GmA/bOeZ4YE6S\nl6IoiqIoiqIoSvdlmjHm38kckAvOVh+gzxayrQL+D7jRGNOYV0QcIAAcZYxpNY1QRIYDnwE7GGM+\njkt/AfjUGHNGB2U6AFgdPb+iKIqiKIqiKD0TL7AN8JwxZkMyB2Z9GmG0wFsstIi8AZSLyLi4dVtT\nAAHebOewYuzIWEuPMkwHe4xFy5SU16ooiqIoiqIoSrfl9c4clDebGkdHpp4DZovIriKyF3A7MDcW\niVBEBovIchGZED3sY2AlcGf0mOEich52z63Hs3AZiqIoiqIoiqL0EPLG2YpyPNaBmg/8B3gV+HXc\n/wuAkdgRLYwxIeAg4HtgHjbU+3Ts2q/nMldsRVEURVEURVF6Gllfs6UoiqIoiqIoitIdybeRLUVR\nFEVRFEVRlLxAna02EJEzReRzEakTkUUismu2y6RkBhH5kYjME5GvRCQiIodmu0xKZhGRi0TkLRGp\nFpF1IvK4iIzMdrmUzCEip4nIMhHZFP15XUQOzHa5lOwRbRciInJztsuiZA4RuSJa7/E/H2W7XEpm\nicaEuF9E1otIbfT5sEuix6uz1QIROQa4CbgCGIdd5/WciPTNasGUTOED3gXOpHUUS6Vn8CNs8J3d\nscF0CoDnRaQoq6VSMsla7P6P46M/LwFPisiYDo9SuiXRDtdfYd8HlJ7HB8AAYGD0Z+/sFkfJJCJS\nDrwG1GO3hRoDnAdUJnwOXbPVHBFZBLxpjJkR/VuwD94/GWOuz2rhlIwiIhHgZ8aYedkui5I9oh0t\n3wE/NsYszHZ5lOwgIhuA840xd2e7LErmEJESYAlwOnAZsNQYc252S6VkChG5AjjMGJPwKIbSvRCR\nPwITjTH7dPYcOrIVh4gUYHsxX4ylGeuNzgcmZqtciqJklXLsKOfGbBdEyTwi4hKRY7FRbt/IdnmU\njPMX4CljzEvZLoiSNbaPLi1YKSL/EpGh2S6QklEOARaLyEPRpQXviMgpyZxAna3m9AUcYF2L9HXY\noWNFUXoQ0ZHtW4GFxhidp9+DEJGdRKQGO3XkDuDw6H6PSg8h6mT/ELgo22VRssYi4BfY6WOnAdsC\nr4qIL5uFUjLKcOzI9grgJ8DfgD+JyPRET+BOU8G6G4Ku31GUnsgdwA7AXtkuiJJxPgbGYkc2jwTu\nE5Efq8PVMxCRrbAdLfsbYxqyXR4lO7TYk/UDEXkLWAP8HNApxT0DF/CWMeay6N/LRGRHrAP2r0RP\noDSxHghjF0LG05/Wo12KonRjROTPwFRgkjHmm2yXR8ksxpiQMWaVMeYdY8wl2OAIM7JdLiVjjAf6\nAUtEpEFEGoB9gBkiEoyOeis9DGPMJuATYES2y6JkjG+A5S3SlgNbJ3oCdbbiiPZeLQGmxNKiDeoU\n4PVslUtRlMwSdbQOAyYbY77IdnmUnMAFFGa7EErGmA/8ADuNcGz0ZzG2J3us0ehiPZJowJTtsC/g\nSs/gNWBUi7RR2BHOhNBphK25GbhXRJYAbwEzsQuj78lmoZTMEJ2HPQI7dRRguIiMBTYaY9Zmr2RK\nphCRO4DjgEMBv4jERro3GWMC2SuZkilEZBbwX2wk2lJgGnZU4yfZLJeSOYwxfqDZOk0R8QMbjDEt\ne7mVboqI3AA8hX2xHgJcBYSAudksl5JRbgFeE5GLgIew28Kcgt0OIiHU2WqBMeahaKjnq7HTCd8F\nDjDGfJ/dkikZYgLwMnaNnsHuuQZwL3BytgqlZJTTsHW/oEX6ScB9GS+Nkg0GYOt6ELAJeA/4iUak\n6/HoaFbPYyvg30Af4HtgIbCHMWZDVkulZAxjzGIRORz4I3b7h8+BGcaYBxI9h+6zpSiKoiiKoiiK\nkgZ0zZaiKIqiKIqiKEoaUGdLURRFURRFURQlDaizpSiKoiiKoiiKkgbU2VIURVEURVEURUkD6mwp\niqIoiqIoiqKkgbxztkTkRyIyT0S+EpGIiByawDGTRGSJiARE5BMROTETZVUURVEURVEUpeeSd84W\n4MPufXUmCex5ISLbAP8BXsTuAH8b8HcR2T99RVQURVEURVEUpaeT1/tsiUgE+JkxZl4Hea4DDjLG\n7ByXNhfoZYyZmoFiKoqiKIqiKIrSA8nHka1k2QOY3yLtOWBiFsqiKIqiKIqiKEoPoSc4WwOBdS3S\n1gFlIlKYhfIoiqIoiqIoitIDcGe7AFlCop9tzqEUkT7AAcBqIJChMimKovR0dgHuBCYB/uwWJS/Y\nBbgL2Ae9X4qiKOnEC2wDPGeM2ZDMgT3B2foWGNAirT9QbYwJtnPMAcCctJZKURRFaY9Xs12APEPv\nl6IoSmaYBvw7mQN6grP1BnBQi7SfRNPbYzXArbfeytixYwEIh8MEg0E8Hg+O47Sblkxev9/PmjVr\nGDZsGD6fL6ljk9FOtU572pnQaUsjXToXXXQR1157bYfXko7rUVvLHVtraQPp0knkWtTWsmdribQF\n+dKuqa11ztbSbQPpuB61tdTaWiptIB3Xo7aWXltbtmwZ55xzDkR9hGTIO2dLRHzACJqmAg4XkbHA\nRmPMWhG5FhhsjIntpfU34KxoVMJ/AlOAo4COIhEGAMaOHcukSZMACIVCVFdXU1ZWhtvtbjctmbyV\nlZV4PB4mTJhARUVFUscmo51qnfa0M6HTlka6dHw+H3vuuWeH15KO61Fbyx1ba2kD6dJJ5FrU1rJn\na4m0BfnSrqmtdc7W0m0D6bgetbXU2loqbSAd16O2ll5biyPp5UX5GCBjArAUWIJdc3UT8A5wVfT/\nA4GhsczGmNXAwcB+2P25ZgK/NMa0jFCoKIqiKIqiKIqSMvJuZMsY8wodOInGmJPaOWZ8OsulKIqi\nKIqiKIoSTz6ObCmKoiiKoiiKouQ86mwpSjtMnjw520VQsozagAJqB4ragKI2oHQedbYUpR20YVXU\nBhRQO1DUBhS1AaXzqLOlKIqiKIqiKIqSBtTZUhRFURRFURRFSQN5F40wk4TDYUKhUOPv8Z/tpSWT\nNz4tWZ3OaqdCJxHtdOm0pZEpnVTWd7I6amtqa5nSUVtTW8uUjtqa2lqmdNTW1NZSpdMZ1NnqgGAw\nSHV1dbM0v9/fKl9baYnkDQQCjZ+d1UlEO106LdMyodORRqZ0OlvfqdBRW8ucjtqa2lqmdNTW1NYy\npaO2praWKZ3uZmvBYLDNcyVCXjpbInImcD52A+NlwNnGmLc7yH8OcBqwNbAeeAS4yBhT35GOx+Oh\nrKwMsB6t3+/H5/PhOE67acnkjUQiAHi93qR1ktFOtU572pnQaUsjUzpdre9U143amtpl+fAOAAAg\nAElEQVSa2pramtqa2pramtqa2lr6bc3j8dBZ8s7ZEpFjgJuAU4G3gJnAcyIy0hizvo38xwPXAr8A\n3gBGAvcCEazD1i6O4+B2uzuVlkjeWAV2RScR7XTptEzLhE5HGpnSSaV2Ku5ZpnTU1tq/7nTpqK21\nr622pramtqa2pramtpZpW+sM+RggYyZwpzHmPmPMx9gRq1rg5HbyTwQWGmMeNMZ8YYyZD8wFdstM\ncRVFURRFURRF6YnklbMlIgXAeODFWJoxxgDzsU5VW7wOjBeRXaPnGA5MBZ5Ob2kVRVEURVEURenJ\n5JWzBfQFHGBdi/R12PVbrTDGzAWuABaKSBD4FHjZGHNdOguqKIqiKIqidB+CQfjHP6C+wxX/itKc\n1hMZ8xMBTJv/EJkEXIydbvgWMAL4k4h8Y4z5Q0cnveSSS+jTpw8AxhhCoRDTpk1j+vTpqSy7oiiK\noiiKkuMsWeLmlFPgo4/gOu2y77Y88MADzJkzB7fbjYgAsGHDhk6fL9+crfVAGBjQIr0/rUe7YlwN\n3GeMuTv694ciUgLcCXTobM2aNYtJkyYBEAqFqK6ubhZRRVEURVEURekZbN5sP7/5JrvlUNLLscce\ny9SpUykrK2sMkLFgwQImT57cqfPl1TRCY0wDsASYEksT63JOwa7NaotibOTBeCLRQyUd5VQURVEU\nRVG6F7W19rWxC1HAlR5Ivo1sAdwM3CsiS2gK/V4M3AMgIvcBXxpjLo7mfwqYKSLvAm8C22NHu56M\nBtdQFEVRFEVRlA5RZ0vpDHnnbBljHhKRvliHaQDwLnCAMeb7aJatgFDcIb/HjmT9HhgCfA/MAy7d\nklY4HCYUCjX+Hv/ZXloyeePTktXprHYqdBLRTpdOWxqZ0kllfSero7amtpYpHbU1tbVM6aitqa1l\nSidV2ps32z76SCSittZDba0z5J2zBWCMuQO4o53/7dvi75ij9ftkdYLBINXV1c3S/H5/q3xtpSWS\nNxAINH52VicR7XTptEzLhE5HGpnS6Wx9p0JHbS1zOmpramuZ0lFbU1vLlI7aWtdsrbravszX1jY0\n/l9tre207mZrwWCwzXMlQl46W5nC4/E0BsQIh8P4/X58Pl/jLtJtpSWTNxKxS8m8Xm/SOslop1qn\nPe1M6LSlkSmdrtZ3qutGbU1tTW1NbU1tTW1NbS1zttbQUACAMQX4fD61tR5ka54uzB1VZ6sDHMdp\njEKSbFoieWMV2BWdRLTTpdMyLRM6HWlkSieV2qm4Z5nSyWVbM6Zp3xO1tdToqK01/Z1uHbU1tbVM\n6aitdc3W6upiL+OutGirreW+rXWGvIpGqCiK0hbXXAMlJdbpUhRFUZR0UFtrPxsaslsOJb9QZ0tR\nlLxn7lz72YUp1YqiKIrSIYGAjUaozpaSDOpsKYqS98QCHdXV6dZ5iqIoSnqIdeips6UkgzpbiqLk\nPTFnq53AQ4qiKIrSZerrbYeezqJQkiEvnS0ROVNEPheROhFZJCK7biF/LxH5i4h8HT3mYxE5MFPl\nVRQlvcScrdiGk0pu8MYbcP312S6FoihKaogFYtKRLSUZWofoyHFE5BjgJuBU4C1gJvCciIw0xqxv\nI38BMB/4FjgC+BoYBlRlrNCKoqQVdbZykz33tJ/nnpvdciiKoqSC2MiWOltKMuSds4V1ru40xtwH\nICKnAQcDJwNt9aH+EigH9jDGxLZ//iITBVUUJTOos6UoiqKkG12zpXSGvHK2oqNU44FrYmnGGCMi\n84GJ7Rx2CPAGcIeIHAZ8D/wbuM4YE+lILxwOE4q+xYXD4Waf7aUlkzc+LVmdzmqnQicR7XTptKWR\nKZ1U1neyOmprHduaiAMItbWittZFnXRrJ5uWjuvRdq372Vp3bNfSoaO21jXtQMD+HQwatbUeamud\nIa+cLaAv4ADrWqSvA0a1c8xwYF/gX8BBwPbAHdHz/KEjsWAwSHV1dbM0fxsr8NtKSyRvIPqtDQQC\nndZJRDtdOi3TMqHTkUamdDpb36nQUVtrW0ekHBD8frW1VOmkwtZibNrkx+3uHraWDh21NW3XMqWj\nttZVWzPRz0jj/9XW2k7rbrYW7EJUlHxzttpDgPa2M3VhnbFTjTEGWCoiQ4Dz2YKz5fF4KCsrA6xH\n6/f78fl8jbtIt5WWTN5IxA6seb3epHWS0U61TnvamdBpSyNTOl2t71TXjdpak45ILEKUqK3lkK3F\ncLl8QPewtUzZQKZ0uoutdcd2TW0t92ytocEV/duFz+dTW+tBtubxeOgs+eZsrQfCwIAW6f1pPdoV\n4xsgGHW0YiwHBoqI2xgTauc4Lr/8cvr06QOAMYZQKMS0adOYPn16s3yO4+B2t76VbaXHpzmOgx2V\n7Dhfsmlt6XT1nImkZUKnI41M6aRSOxX3LFM6uWxrsRkK4bDaWqp0UmFrMYJBB6+3e9haOnTU1rRd\ny5SO2lrXbK0pQIakRVttLTds7YEHHmDOnDm43e7GztwNGza0Ok+itC5BDmOMaRCRJcAUYB6A2Lsw\nBfhTO4e9BhzXIm0U8E1HjhbArFmzmDRpEgChUIjq6upm3nkqOO20idTVFbOuPVdRUZQtEnO2Qh1+\no5VsUVsLcQNdiqIoeUkwKDgOdGH5jpIHHHvssUydOpWysrJGB2zBggVMnjy5U+fLx322bgZOFZET\nRGQ08DegGLgHQETuE5Fr4vL/FegjIreJyPYicjBwEfDnDJe7TdasKeG77/KxGhQld4hNpQ6HNRph\nrhA/l6CuLnvlUBRFSRX19VBcrM6Wkhx5NbIFYIx5SET6AldjpxO+CxxgjPk+mmUrIBSX/0sR+Qlw\nC7AM+Cr6u261qSjdhNhGk/oAzB3iQyPX1mavHIqiKKmivl4oLdVZFEpy5J2zBWCMuQMbUbCt/+3b\nRtqbwJ7pLpeiKJknHG56sdcHYO4QF4xQR7YURekW1NfDwIGwcWO2S6LkEzp/LYvoi6GidJ3YqBbo\nyFYuEQg0TekMhXR6p6Io+U04LITDotMIlaRRZyuL6NQaRek68SMoumYrd4h3tvTFRFG2zKpVLr75\nRiPJ5CrBoG3T1NlSkiUvpxF2F+rq9MVQUbpKvLOlo8W5Q/zUQX0xUZQtM2FCL+BHbNxYme2iKG0Q\n22PL59M2TUkOdbY6wO/3U1lpG71wOEwgECASiTTG9G8rLZm869fXAuUAbNxYiUjixyajXVNT0+wz\nmXMmo50JnbY00q2zbJnDsmUO06bVdam+U103mdLJdVvbsMEF9AKgrq6BqqravLW1jjQypZMqW9u4\nsbaxXjZt2kxtbW3e21ombSBTOt3B1rpLuwYVaddRW+u8rcWcLY8nSChUQFVVVd7amrZryev4/X46\nizpbHbBmzZou7Ri95fP7gCEAvP76OxQWRtKmBbBixYq0nj+TOpm8ljPO2J3PPvMxYMBi+vQJpk0n\nE3RHHfs9svFv1q1bz9Klq1KukQm6m84nn6wFBgHw6aer6Nt3fVp0utN9607XojqdYf8M6XSne5Y5\nnWDQTvEMBDYSDg9k6dKladPqTvetu1zLmjVrOn2sOlsdMGzYMCZMmAA0ebler7eV5xuflkze+riV\n/WPGjKd3b5Pwsclo19TUsGLFCkaNGkVpaWlS50xGOxM6bWmkW2fAgCI++wxKSsYyZszmTtd3qusm\nUzq5bmsFBeWN6RUV/Rg3rlfe2lpH9yzfbK1//2GNxw4bth1jxvTLe1vrTu1ad7K17tKuxVBby01b\n++KLtQAMGWJHIMeOHUcwmJ+2pu1a8jrBYOc729XZ6gCfz0dFhf1ShUIhqqurm+0m3VZaMnldrpo4\nrXIqKhI/NhntGKWlpUlfTzLamdSJ10i3jstVAEBBQSnFxZFO13eq6yZTOrluax5PWePfLlcB5eXF\neWtrHd2zTOmkytYcp7jxd6/XR3FxKO9trTu1a93J1rpDuxa/CbjaWm7aWmwaYUVFYVSznLq6/LO1\nTOt0F1vz+Xyt7l+i5G00QhE5U0Q+F5E6EVkkIrsmeNyxIhIRkcfSXcYtEb+AXBf25y6R6OzOQKDj\nfEp2iNWL4xj9HuUQ8QGAwrqYXFE6JD46sX5fcpNg0L4yF0f7kbSelETJS2dLRI4BbgKuAMYBy4Dn\nRKTvFo4bBtwAvJr2QiZA/MuIviTmLrEeR92YNTeJOVsaISq30NDvucujjxZw9dXZLoUSz+bNTb9r\nx15uEh+NELRdUxInKWdLRG4UkdHpKkwSzATuNMbcZ4z5GDgNqAVObu8AEXEB/wIuBz7PSCm3QKDZ\n/kDZK4fSMbG60QdgbhKrl5IS0H22cgcN/Z67/OpXJVxxRbZLocSjm7PnPjFnS0e2lGRJdmTrZ8CH\nIvK6iJwsIp2fwNhJRKQAGA+8GEszxhhgPjCxg0OvAL4zxtyd3hImjo5s5QexulFnKzeJd7b0e5Q7\n1NcLsWCuWi+K0jEB3Zw959FphEpnScrZMsaMACYDnwC3Ad+KyD9FZM90FK4d+gIOsK5F+jpgYFsH\niMhewEnAKektWnKos5UfxHrodRphbhKrl6IiffjlEoGATrfJdRoasl0CJUZAN2fPeYJBG5UuFlhP\n2zUlUZKORmiMeRV4VUTOBI7BOjELReQT4O/A/caYlo5QJhDAtEoUKQHuB35ljElqW/ZLLrmEPn36\nAGCMIRQKMW3aNKZPn56K8mqAjDwhto9d/DQPJXcIBMDjsT/68Msd6uqE4mKoqtJ6yVW++w4GDMh2\nKRRQZysfqK+34xPqbHV/HnjgAebMmYPb7UbEDoxs2LCh0+frdOh3Y4wf+CfwTxEZgXW6LgZmAYWd\nLtGWWQ+EgZaPiP60Hu0C2A4YBjwlsTsWHdETkSAwyhjT5hquWbNmMWnSJKB5GMhUoQvI84NYlCid\nRpibBALg9YLbDaGQTr/JFerrpbFetH3LTWIdSUr20TXcuU8w6OA4hsJC+5xRp7j7cuyxxzJ16tRm\nod8XLFjA5MmTO3W+TjtbMaLrtn4E7AOUA2ndwtkY0yAiS4ApwLxoGST695/aOGQ58IMWabOAEuA3\nwNr2tMLhMKHotykcbf3Cca1gW2nJ5PX7mwbiAoEQoVDix3ZWO9nr6ax2unTa0ki3Tm2tAaSxvjpb\n36mum0zp5Lqt1daGKSx04XIZwuH8trWO7lmmdFKlXVtr8HoNjgOhUKTT5U7H9fT0di1GIBDqFrbW\nHdo1v1+wKyQgGIx0G1vrTu1aIOCiqMgAEcAhGMxPW8uUTne1tc7QaWdLRPbGRv87CjuF72Hgt8aY\n1zpdmsS5Gbg36nS9hY1OWAzcEy3bfcCXxpiLjTFB4KMWZa/CxtVY3pFIMBikurq6WZq/ja7AttIS\nyev3F+ByGSIRobq6lurqUMLHJqMdiHaZBQKBTl9PImmZ0OlIIx06dXWBxpGtmppQu8d2Vacr9yxT\nOrlqa5s2BSks9CASIRzOX1tL9J5lSqertub3hykoCOM4DrW1wU6Xe0s62q4lrxNj0yY/fn+43WO7\nqqPtWuJplZUFgJ2fVlvb+XePXLO17tSuBYMOXq8hEPADZWzeXEefPvlna5nS6W62FgwG2zxXIiTl\nbInIIOBE7JTB7YE3gXOBB4wxmzs6NpUYYx6K7ql1NXY64bvAAcaY76NZtgK6PMDr8Xgapw2Gw2H8\nfj8+nw/HcdpNSyZvQ0OIoqIQfn8BhYXFlJUlfmwy2pHorrxerzfp60lGOxM6bWmkUwe8GBObmlYA\n0On6TnXdZEon123NmEJ8PsHjEUKhSN7a2pbuWb7ZWijkpqTEwXHAcQqB+ry3te7Trlk8Hh8+X/7b\nWndo11yupinQjlNIWVlpWm1A27Xkba2+PkhREZSV2cg/hYVFQP7ZWqZ0uputeWLhdTtBsiNbXwAb\nsQEn/rGlkaF0Yoy5A7ijnf/tu4VjT0pEw3GcxrmayaYlkjcQCFNUFMbvLwDcxGdNpXbMULpyzkTS\nMqHTkUY6dILBpjyx9UCp1E7FPcuUTq7aWjDowuuFggJDOCw4jisvbS2V9Z0Kna7aWiDgorhYcBwa\nOyzy3da6S7vWhJtYUj7bWndo1+IjQxqTmjYs2WvJlE6+2lp9vUNREXg8VtOY1Gtru5b7ttYZWpeg\nY44B5hljdFlgCqirE3y+Btav14WWuUpsCiFoHeUqdXXxATKyXRolRiBgw/G73dCFqe5KGtHvS+6g\n+2zlPsGgXbMVe+fWdk1JlKScLWPMY/F/i0h/bBRAV4t873W9aN2fQEAoKrJPO33o5Sa1tboXWq4T\ni0boONCFKdVKigkEhIoKWy/6UpI7xM8k1H22cgcN/Z771Nc7jR17oO2akjjJjmwBICLjgXuBMdjg\nGGD3uIrtddX5sbYexP+zd95hUhRbG/9Vd0/a2Z0NZMWLAUFQRARBxKsiKqiIelUMGMGcI14v1/wh\nhosRM3pFRRFRTIB4RURURFAEQQQxIIJkltmdndhd3x+1M7O7bJiZDcxiv8+zz+zUVPdb4XR1nTqn\nTgWD4PHEI57s5MLYqBbxg6fz8+0XYLaiYuh3+znKHqjFJKVs2SH5swfxs4LAHtOyCbaylf0Ih23L\nlo3MkJGyhTpfayUwAnW21Q6HCduoG+rQT9uylc2IHzxtK1vZi4ruavakPnsQ7xfbspVdiESSa6H2\nmJY9CIeT/9vPS3YiHNYpKrKVLRvpI1Nla2/gNCnlqoYszF8NwaCgqEg9rfZLLzsRdyP0+WyXm2xF\nKAR5KnCX/fLLIgSDwla2shDhcFLZsse07IG9Zyv7EQ/GZCtbNtKFVneWajEL6N6QBfkrIhTCtmxl\nOeLKlm3Zyl5UDJBhv/yyB+Gw3S/ZCNuNMDsRCoHTqZyE7H7JTqhohEnLlt1PNlJFppati1GHCh8A\nLAUqrY9JKd+rb8H+ClB7Guw9W9mM+J4tn88eWLMV8T1boZDtRphNqLhnyx7fsgcVLVv2mJY9CIXA\n65VEIsLulyxF1T1bVY6ts2GjRmSqbPUF+gHHV/NbkwTIEEJcBdwMtAUWA9dIKRfUkPdi4HzggPKk\nb4B/1ZQ/DtM0iZWPeqZpVvqsKS2dvGVlWsKyFQ6bxGIy5Wsz5U63PplyNxZPdRyNyRMIqNE0N9di\n0yZ2yNcY9WkMWWvovmkqnlS4QyGJ0ymJRCSm2Xxlra42ayqehuIOBsHlMtF1jWjUyrjcjVGfv/K4\nVtGyFQqZu4Ss7QrjWlmZhtcr2bYNolFrl5C1qjzNXdbCYScul4U6/cggErHHtXTr0lQ8jSlrmSBT\nZesJ4FXgXinlhozZM4QQ4kxgLHAp8DVwAzBTCNFJSrm5mkuOBF4DvgRCwD+Bj4QQXaWUf9bEE4lE\n8Pv9ldICgcAO+apLSyVvKFSYsGyVlgbx+yMpX5sOd6jcGTwUCmVcn1TSmoKnNo7G4PH7TXJyJBAl\nEtFqvLa+PPVps6biyVZZKyuz0LQIUgpM02i2spZqmzUVT/1lTSBlEHARCsUyLnfdPPa4lg5PRctW\nSUmQQCBS47X14Yl/2uNaamklJV5ycjRAJxiM7hKyVhNPc5W1cNiLwxElGIwABQQCoYzLXRtP/NMe\n1+rP05CyFqnH2TKZKlstgEd2hqJVjhuAZ6WULwMIIS4HTgSGAw9WzSylPK/i93JL12nAAJTSWC2c\nTic+nw9QGm0gEMDr9SZOka4uLdW8lgXhsMDlMtE0iWF48PncKfOkw22V27rdbnfa9UmHuyl4quNo\nTJ5IxEFODng8joTLQCb93Rh901Q82S5rkYhGfr6TSEQSi9FsZa2uNmtOsmaaEI0KCgvdOJ0amqbc\nO5u7rO0K41pFy5bD4cHrdTRrWWvIvmkqnurSpNTwetXiq6Y58flymr2spdJmzUnWIhGd3FyLggLV\nNw6HGyhtdrLWVDy7mqw5nU4yRabK1ttAf+DnjJkzhBDCAfQE7ounSSmlEOJjlHtjKvACDmBrbZl0\nXccwjIzS6sobV5ZdLqv8gDydilkbkjsuKPW5ZyppTcFTG0dj8ITDGjk5ApdLEIvJBuduiDZrKp5s\nlTV1hIKOYViYptVsZa0h+7sheOoja3ErcG6uGtcsSzQ4tz2uZcZTMfS7Zenouj2uZYOshcOQm6uU\nLcvSdglZS+fapuKpj6wl5wPxfA3PbY9r2S9rmWDHEqSGlcAYIcThwPfsGCDj8YxLVDdaoiS8qlVt\nA9A5xXs8AKwFPm7AcqWF+PlNLpeJYdgblbMVZWWCnBzsPspiVDzUWAXIsI/929mIT+jtABnZh7hl\nS9PsMS2bEA+QAXbghWyElDtGI7THNRupIlNl62KgFLUX6sgqv0mgMZWtmpDSLEsI8U9gKHCklLJW\nB8xRo0bRokULAKSUxGIxhg0bxrnnnlvvwiYtW6b90stiVFS27DNpsg9SVla27JdfdiBu2YqfSWOP\nb9mDcFjHMGS5tX5nl8ZGHKEQtG6t/rejqmYfIhGQMhlhFez3za6MSZMmMXHiRAzDQAj1PG7ZsiXj\n+2WkbEkp98qYsf7YDJhAmyrprdnR2lUJQoibgZHAACnlsrqIRo8ezVFHHQVALBbD7/dX8jutD+J7\n+LzeGIYhsQ8xzE4EAurAXIfDHlizEdGoUrhsC0p2wbZsZS/CYT2hBNsLSNmDYDBp2bKV4OxDKKTm\naG63bdn6K+Css87ihBNOwOfzJdwIP/30U/r375/R/dI61FgIMVcIcbMQYt+M2BoAUsooKnT7gArl\nEuXfv6zpOiHELcAoYKCUclFjl7MuJJUt240wm1FSIvD5bMtWtqI8CFEVN0IbOxtxVzWPh/I9Wzu5\nQDYSUPtOJA6H/d7JJqiFPVvZylaUlalP243QRiZIS9kCxqOCUHwrhFguhHhACNFPxG1sTYeHgUuF\nEOcLIfYDngFygJcAhBAvCyESATSEECOBe1HRCn8XQrQp//M2cbkTiCtbOTkxW9nKYsSVLXtikp1I\nrjaCrkv75ZclqOpGaPdL9kBZtqS9gJRlKC1Vli1dt+zFiSxE/F1T0Y3QnhPYSBVpuRFKKScAE4QQ\nLpQl6WTgTcAQQnwAvAd8JKUsa/CSVi7HZCFES+AelDvhdyiLVfmxs7QHKj4GV6CiD06pcqu7y+/R\n5KiobNl7trIXpaVJy5bdR9mHcDipbNl7trIHthth9iIc1vB4lLXRHtOyB4FAXNmStoU+CxEPama7\nEdrIBJnu2QoD08v/LhNC9AGGoKxHrwkhPgHGSCm/aLCS7liGp4Cnavjt6Crfd+Yes2rh94MQErfb\nLN+ztbNLZKM62G6E2Y0d3Qh3bnlsKFR0I7QDZGQX4hHVQiF7sphNUJYt0DRpPy9ZiOosW/bzYyNV\nZBqNsBKklPOB+cAoIcQ+KMWrXUPce2fCNE1i5aOeWf5UmRWerurSUs1bXCzIzRVomnpwIxGLWMxK\nmSdT7nTrkyl3Y/FUx9GYPCUlgtxcs3zCqNVZ74aoT0PLWn15slnWysqUv43DEUMIME2t2cpaXW3W\nVDwNwR1XtgwjhqZpiTPqmrOs7SrjWiSila/OS6JR2exlLVOebJK1SASiUQO320z0y64ga1V5mrOs\nlZaqd43TGUM5ThlEo1bG5W6M+vyVx7VU6tJQPJmgQZQtIYQzHkZdSvkz8EhD3HdnIxKJ4I/7+5Uj\nEI/ZXkdaXXk3bfKQm+sAQNMsyspi+P3BtHlS4Q6VL/+HQqGM65NKWlPw1MbR0DxSgt8vcDiCRCKC\nSMRT47X14Yl/NpasNRRPNspaSYkyN5pmANM0MM2cZilrNfE0V1kLh9XSr2n6kTKXSCTzctfGE/+0\nx7XUeUIhB06niRAawWCEQCBY47X14Yl/2uNa3WnFxQIoxOmMoOsuQqEYfn+4wXnqqktT8TRHWfP7\n1URb08KUlvqBIoLBSMblrindHteyV9YikVpPi6oVaSlbQoihwDtxxUoIcTVwC9BeCLENeFxKuVP2\nQDUGnE5nItS7aZoEAgG8Xm/iFOnq0lLNGw5r+HzxVXmBprnw+Rwp86TDbZXvtnW73WnXJx3upuCp\njqOxeIJBnUhE0L69m40bIb6okUl/N0bfNBVPNsuaZbkAaNHCi9er3G+ao6yl0mbNSdbiylbr1j7c\nbi2x36E5y9quM65B27YCp1ND1114vVqzlrWG7JudJWvbtyvuggIDXZcIYeDzeZu9rKXSZs1H1lQo\ngvx8NS/UdYlhOIFws5K1OHa1ca0pZM3pdJIp0rVsvY5yD9wohLgIeAh4EOVC2AO4TQixTko5PuMS\nZRF0XU/E1083ra68paWQl6cExDBASg3D0FK6Nl3uuKDU556ppDUFT20cDc2zfbt6sNq2VS/DuCtU\nQ3I3RJs1FU82ylo86l1uroFhmJhm85S1dK5tKp76yFoyvLhRHvrdanBue1zLjKe0VKOwEHRdYFmi\nUbjtcS29tPhiRF6eQNMklqXtErKWzrVNxZMpd3wBKTdX9Y2ug2VpDc5tj2vZL2uZYMcS1I6KIXIu\nB+6QUj5U/n26EGIrcCUqRLyNWuD3J8/UMAx7A3k2orhYuXm2agW//ALRqEDKnVwoG5VQNRqhZQk7\nbHIWIBRSQRhAoOtgH9qePQgEDAoKZHm/7OzS2ADYulV9FhXFoxHu3PLY2BHxYEwu5UxhPz820kK6\n52wBxKebewMfVfntI6BjvUr0F0FFZcsO/Z6diFu2WrZUE3mwD2fNNsRfgPHDc8F+AWYD4hHvwF5M\nyjaUlhrk59vKVjYhrmwVFiply+6X7EM4LHA4LLTyWbP9/NhIB5koW4OEEEOAEOog4YpwkVTGGhVC\niKuEEL8KIYJCiK+EEIfUkf+M8oOYg0KIxUKI45uinDWhpARyc+3JSDYjEFCz94KC5ETeDv+eXYiH\n43W51MsP7BdgNiAc1skpfzvYk5LsgZQQCDhsZSvLsGWL+lQWR2lbgrMQ4TA4HMnVVvv5sZEOMlG2\nJgDvALsDR1f57VDg5/oWqi4IIc4ExgJ3ovaKLQZmlh90XF3+vsBrwPPAQajyvw2rPHoAACAASURB\nVCOE6NrYZa0Jfn9FZcteycpGhEI6Qkg8HnAoj0JbKc4yhMNKEY7/gd1H2YD4ni2wJyXZhFAIolGN\n/HyJYdj9ki3YuhXy8tR7xj5nKzsRt2zFYY9rNtJBusrWgVJKrcLf6Cq/bwRua6Cy1YYbgGellC9L\nKX9E7R8rA4bXkP86YIaU8mEp5Qop5Z3At8DVTVDWahEoPy0ebMtWtiIU0vF6QYiKLmr2imM2IRQS\nuN3qf9uNMHuQ3LNlT0qyCdu3q/ErP99C1+33TrZg61YoKlL/226E2Ymqli17scJGOkhX2VpS7rJ3\niRAit+qPUsoPpJQzG6hs1UII4QB6ArMq8ErgY6BvDZf1Lf+9ImbWkr/REQiQcLOx92xlJ4JBPbE6\nH7ds2W6E2YVwWOBRx58l3AjtZ2nnQ+3ZUv/bylb2IK5s+Xy2G2E2YetWaNFC/W8HyMhOhMMCp9O2\nbNnIDOkqW0cCP6Bc+NYLIV4SQvy94YtVK1oCOrChSvoGoG0N17RNM3+jQylbScuW/dBmH9S+k2Qf\ngT2RzzaEQuxg2bL7aOejohuhPb5lD5KWLVvZyibsaNmyPSiyDdXv2bL7yUZqSCv0u5RyLjBXCHEN\nMBS4EJgjhFgFvAC8LKX8s8FLmRoE6QXnqDN/IBBg27ZtgDrgLBQKYVlWItZ+dWmp5BVCp6ysEF0v\nP1yDKMGgZNu2QMo86XCXlJRU+kznnulwNwVPdRyNxaNcoUy2bfMTDBpAHiUlIYqLzbT6O936NKSs\nNZYMNBVPXbLm90dxOp3lfaQB+Wzb5icnRzRK3zRGfVJts6biaQhZC4VyMYwo27YFiEY9RCIGZWVl\nzVrWdoVxbf36KODDMEqxrByCQUlxsb9Zy1pD9k1T8VRN27Ahl8JCSUlJCZrmIxiMsm1bWaPIgD2u\nZcZdWmrgdMoEjxA+AoGwPa79hWQtEAiQKdI9ZwsAKWUA+C/wXyFER+Ai4CrgXiHEh1LKIRmXqG5s\nBkygTZX01uxovYpjfZr5Abjtttvwer2V0vr370///v1TLmx1UJPCARQXryv/XoLfr7Fw4Xf1um9d\nWLFiRaPevyl5moIjFOqKEGUsXLiQX34pBHqxYsXP+P3BOq9NF7tS3zQlz/r1xViWYOHChfz2WxHQ\nk8WLl/Hnn6EG49jV2qwpeMLh3oTD21i4cDmbNu1LWVkrli9f3ihcu1K7NTbHihVtgdasX/8jgYCL\naNRk0aIljci36/RNY/KsXduHvLztrFixAl3vxdatfhYuXNYoXHE09zZrap5Nm7ricHgTPLFYP9at\n28Dy5asahW9Xabem4mhontmzZzN79uxKaU2ubFWElHKVEGIMsBoYA5xY33vWwRcVQnwDDADeAxBC\niPLvj9dw2bxqfj+2PL1GjBkzhsMOOwxIarlut3sHzbdiWip5t25Vzb7PPm2B9eTn5xEMOujVq1fK\nPOlwl5SUsGLFCjp37kxeXl5a90yHuyl4quNoLJ5gUKeoyEWvXr2IRFSf7blnR7p2NdLq73Tr05Cy\n1lgykC2ylpNTRGGhm169elFaqryiO3fen332EfXmaUpZS6XNmpOshUI67dr56NWrF9Onu3E4nHTp\n0qVZy9quMK4tWCDRNIvu3felqMiHpkGPHj2ataztCuNaNOqlY0cHnTt3RtclXm8+vXr1ataytquN\na263E6czlODJyXHRqlUbunRxNCtZi2NXGtcaQ9Z69OjBlVdeWSntyy+/ZPDgwWSCeilbQogjUBEA\nTwMsYDLKnbCx8TAwoVzp+hoVnTAHeKm8XC8Df0gp/1We/zGUu+ONwDTgbFSQjUtqI/F6vRQWFgIQ\ni8Xw+/18/XU+a9fqjBiRTPP5fBhGsimrS6+YVlKi0lq0UJtN3G6DcNhBYWFhndfWllZbOkBeXt4O\n9cmEpzaOpuKpyNFYPKFQjJYtdQoLCxP+9IbhoaDAm1Z/p1uf+vZ3U8hAU/HUJWuW5SA316CwsJD8\nfLVZy+v1UVjYvGQtlTZrKp6GkLVwWCM/X6ew0IfXC1JKcnJymrWs7QrjmmWV4XZb+Hx5uN1OYjEo\nKCho1rLWkH3TVDxV00IhKCrykJeXh6ZZaJqDwkJvo8hAdXVpKp7mPK5ZVgSHI5jgcTjAMNz2uPYX\nkrWqnm7pIG1lSwixO3ABar9WR+BL4Fpgcrl7YaNDSjm5/Eyte1Dugd8BA6WUm8qztAdiFfLPE0Kc\nDYwu//sJOFlK+UM6vIsW6Rx/vNJwBw6EthmG14hbIpMbyO3oQ9kIFfrdDpCRzagY+j2+eGX30c6H\nikaoNpPbR1tkD4JBgculomLoutr0b2PnIxSiQvROez6QjVDRCE1UfDY7GqGN9JCWsiWEmAEcg9o3\n9TLwopSyaZwxq0BK+RTwVA2/VT1sGSnlW8BbqTOczOeftyQQgGOPBSlh+PBcPB5JMCj45BM455zM\nyh5XtrxeCAbVQ2sPrtmHYNBIhOe3Q79nJ0J2NMKshFK2kpN6e1KSHQgGSShbdpTI7EEwWFnZsvsl\n+6CiESZjqtmLSDbSQbqh36PA6UB7KeWtO0vRahq8w+23H8DgwTB+PBxzjM7vv+vMnm3SqRNccglc\nf326zadQ1bJlK1vZiVBIs0O/Zzmqs2zZE5WdCykrPzu2spU9UJYtZXG03zvZAdOESCS5aKRptmWr\nofHJJ1AlIF7aCIdFNaHf61kwG38ZpKUtSCmHSCnflVL+BUSsJe+88wWHHAJXXQWffy54+ulSevaE\nY45Rg+OTT2o88YQbmU7AeapzI7Qf2mxEdW6E9rka2YVw2LZsZRuiUbAszT7UOAtR1Y3Q7pedj7gr\nZ2XLlv2eaSj8738wYIBaHDfNzGU+HKbSocYOh+3pYiN1ZGaa+UtgC/n5UV54AY4+Gm680WLo0AgA\nDzwAp56qct11Vw5nnqmxbh3ceKNSwupCVWXL4ZApXWejaREKGQlly3YjzE6EwyIxSUkqxDuvPDbU\nhB7A47EXk7INoVBywmgrW9mBYPlJIvaerYaHacIdd6j/X35Zo3XrIu64I7Npb1XLltutnqfmAMuC\nBQtg82ZYs0bNY0aPdrN2bQ5+P1x3Hfj9Ku+sWQ7GjROYJixbBmPHCkaM8LJ48c6tQ3NH2gEy/mro\n1g1mzYJYzEoIY24uvP02/PFHjH331Zk6VSMSgWnToE8fOO202u9Zps4qTOwHcjrtjcrZiFBIJydH\naVe21SQ7YQfIyD4kxzfbTTrbUFYmcLvVJn9b2coOxJWtim6E9qJe/REMQr9+Oj/+CBdfDK+9Jikr\nEzz3nODKK2HFCjjuuNTvpyxbyQfG42k+ytYdd+Tw9NPJ6f6gQfDhhx46dOjOo4+6WbwYXn8d7rhD\nMGZMDuvW6dx9NxQXgwoIovPpp5Lu3eGVV6Bdu51Vk+YL27JVD7RtC0uWFKPrkmnTVNqzz0LXrjqP\nP550L7zwQnjqqaRbQCCgBtb45NDlkraylWWIxSAa1SpYH+PptntHNqG6ABn2BHLnIm7Zqhhcxla2\nsgMV3Qhti2N2ID5hty1bDYOSEjjxRI0xYzz8+KNgzz3hvvvg119NRo8OUFys0gYOhK+/VntMS0rg\n7bdrf7cry1Zyz4jHk1xYylZs2AA9e+o8/bS7UvqHH6rP1atzWbxYvTg3bYJrrtFZt05NTJWiBQMG\nWJx+epjiYsGcOfD3v5P21hkbtmWrVpimSax81DPL30pmhbeTaZq0aCE5+mjJ//6nHlR14LTg7rtz\nGDdO8sQTJhMm6EyYoHPWWeqakhKB16sl7uVwWITDkljMrJEnlbRU8tZVn4bibiye6jgag6ekRLkL\neDwVeQxisfTbPN36NGR/NwRPNsta3C0qFrMQwgQMIhGrwbkbU9ZSbbOm4qkvd2mpenZcLsWj64JI\nREPK5i1ru8K4FgxKnE4L0zQRwiIaFc1a1naFcU0FbjAwjBimaSaiETY3WQuFwDBMfvoJli4VnHhi\n045rq1dLXnvNZNUqwUcfaXz0kQchJEuWmHg8Kt+xx0YZNSrJ26cPtGypc8wxXiZN0hHC5KyzNObP\nt5g928XIkQbbt8fIyVFnBzqdZqI+LpfG5s2kVcamlrUZMwRLlijl6d13ozz8sM6JJ0refFPjwQeL\nueQSnW7dXJxzjsGoURorV6p57JNPRgkGNS6/XGIYJhs3Bjj+eI3vv9d59FENp9Pgvvtc3Hhjdoxr\nqbZZQ/FkAlvZqgWRSAR/3HewHIHAjkeJtW4dBVycdlqYt95yccopYd55x8WWLYLzz08aD7/80uCI\nIwJs3erB43ESKl/S0rQYwaCsxFUdT6ppVdPjPKFQKKX61JW2Zo1Gy5YwcqTkkktK2Xdfq1F40q1L\nQ/Js2aJMjQ5HBL8/Vr5aX0gsllqbp5rWEG3WVDwNLWtr1wpKS4N07mzVmq+6tCQPQAi/P0QkogEu\nAoEQfn+s1utTSWsqWUu3bxqTZ9YsB2vWaFx4Yebc27ertte0EH6/hWk6kTIX08zuca2mtF1pXAsE\ncvH5TEIhE9OMEI0aid/tca3peQKBAJs360A+lhUgFAqh6xCJSPz+kpSuTzetoWVt7VqNP/7QuPji\nfIqLNTweyZYtGo8+qtGihZuVK6O8/HKMUaPKiJ8JWx9ZC4dh/foynE51RqlhqPpceqmXBQt0OnUy\nadPGYsMGjYICSTTqT7hl7rMPjBwZpG1biy1bBKNH57B5s2DSJBcAQ4cqxWTUKPjoI1XYpUtL2W8/\ni3C4AIdDJtrNMHIIBAykhEWLgnTqlP57rLr0hpS1L7/MAXQuvDBEv35lHH64+n3ECCgrK+Opp5bT\npUsXcnJymDcPXn3VyYwZTv7xj9LE9pZwWB1TdMopJRx6qODRR9Whwf/3fzlcdtm2jOuSSX0y4WnI\ncS1Sj+AKzU7ZEkIUAuOAwYCFOjvrupoOVC7PfzdwHLAH6oywd4DbpZQ79n4FOJ1OfD4foDTaQCCA\n1+tFL/f/i6cddZTGxIkwbpzO6NEx2reHq68O8dtvTj79VEPTJIYBy5frHH+8i1jMQV6ewF3u/+T1\n6kQiAl33cfLJGgMHBrnxRgNd19m+HRYtsjj44NJquSum1ZRuWWoQcLvdKdWntrTvvoNDDlGBIwIB\nwQsvuHnjDZMPPhB07erko48OYOJEBz6fjy++gJtv1hg7toQ+fdxp8aRTl/res3oedd/8fAOfLxdR\n7mEQi4mU2rw+9Un12qbiaSxZO+44B+vXa4wbZ/Lnn4Lbb4+mzB3nCYc1Cgpc+HxO8vLUqpNhePD5\nRDOStdTarCl4hg5Vr4R99zWZPNnLf/8rM+BWQ3FBgRo/8/NVX0QikJ+/c2StPnLeFDxNJWvxiGpu\ndw4ejxNQ49nOkLV0+7u5jGuZprVs6cXtjqJpQSxLy3pZ27zZy5o1Bscck2w7UPsCd99dcv31RcDf\nadXKYtMmjU6dHLzzjuDEE4P8/e8OOnXSeOQRjU6dJGedFavEU1YGU6ZIdtstyNixuTz4oMXdd2vc\neGOMoUMNNmzQ6NhRsmqVoF27fIqKvCxbphSmlSt17r47hhAh+vY1dmjHe+7R0HUHmzfD6NFUi48+\ncib+37YtD59PJg41jrebz6exfLng7rs9PPGEh6++itGzZ3bJ2rff6pxzjsnYsWUpjWuXXWZy7rk1\nzzU7ddJZvjzGyy/DmDEGW7Z42WuvnT+uNdU71OlMykW6aHbKFvAa0AYYADiBl4BngXNryL8b0A64\nEVgOdCjP3w4YWhuRrusYhlFn2gUXCM45B3JyDNq2VfsTxo71k5dn8N//auTlCe6/X7JihY6u6wSD\nGl4viQ70eAThsOC99wzmzIE5c7w884zkyScF118PP/8Mq1eDz6fzxRcGXi90764UAl3XiUYN3G4S\nCkHVcsZ5Uq1PbWlz56rvgUCS7Mwz44KcC+TSq5cFaPTuDQsXwvvvOznssPpz11UXKUEIHV03ytsm\nc55QSH36fBqGYST2BcVi1V/bGPVJNa2peFLhToXHstTm5fXrldX36qvVNVddpeNypcZdcfLo9eoY\nhgo0AyClhmHotV6fSlptdcn0nuny7CxZGzJEvbDuukutBqfDEwopntxc9ezE96JEo6LJZa0h0pqC\np6lkLRQycbtNdF3H4VChsBuD+684rmWaFo2qtNxctcAadyPMZlmLRKBzZ1fi+0EHSUpLLW65Bdq0\n0XE6BSecoH7btEmN8zfdpHjnzlUWo6Ii2LpV5Vm7FhYs8DJwoMGdd+o4nfDHHwBqs/TBB6t7vPtu\ncrK7apWag/z5p86ffxZWKu+xx0LnziF8Pl+NdWnbVu25mj7dxOMJsHKllxtu0HnrLRXkzDAkUsLq\n1SqYTCSiIkfHr8/NVe+xJ55QA9zMmQZ9+qTXjqDukZfX8LIWjRosXgwXXFB9vkx59tsPrrsuxvjx\nFmef7WT2bMHVV8PUqTpLlgTw+Xb9d2gmaFbKlhBiP2Ag0FNKuag87RpgmhDiZinl+qrXSCmXAWdU\nSPpVCDEKeEUIoUkprarXpF+u5GbwqumXXqr+nzxZ8vvvqqMCgcr5nU4wTfjgg2Ta778LTjop+X3c\nOA9lZRpPPqm+d+qks3ZtIS+9ZDF0KHTsCA8/DIcfDtu3CyosIjQovvgCOnWSXH55Gd26uTn2WFWn\nbt3g++9VnnXr1MD4zjvq+/z5STHbtAkKChqnbOPHu/jnPw3at4f16+HJJwVz5ng591zB8cend6+4\nMrljgIyGLHHj48Yb1aHcc+ZoPPZYF+69V0PTID9/55Qnfi7WWWftGJtnxgzBjz96uOUWaNWq7nvF\nYgLTFDsEyGgufVRWBvffD5dfDsXFjp2+6diqZiT86CO44or07hPfNB4P/R5XgpvD8RaWpcaooiI1\nCWqssSodmCasWgWdO1deUMsEFQ81Nozm86zsyqgu9Hu2n7O1YIFR5buJ3+8vV25UGPHevWMcddSP\nFBTsjWnmcPvtle8RV7QARo3SAT0xZwDYfXfJ2rXVt8OZZ1r4fBojRsDChQEKC79h/fqenHuul2+/\nhUMOSYYyrw0eD5x8ssTvj3HUUZL27dWxPvPmxRCilKuvzmfuXBg+XOV3OMxK11bE+++rsWPkSBLu\nkiUlaq734IOCIUPUu6qkRB2y/M03cMYZcPDBRYwaZbFpk3omb7hBNEj/f/GFer4PPbThXywtWsCt\ntwa5+WZvYq553XUSl6v26/7KaFbKFtAX2BZXtMrxMSCBPsC7Kd6nAPA3hKKVKvbYA374QQMkgUDy\nYQQVjRBg8uRk2t57S375JfnAPfRQ5Sc7vpHxjDOUsvPTT5QrZwZQyMsvm8RioGmwYIGH4uJ96NWr\nfnVYuBCmToVbb5VccEEYn8/FVVfB4MFw5JHwzTd+3nvvd3Jz9+bOO5U22batZO5cB8OHW+y/P/zz\nnyrqz5FHqns+/DDsuScMGVK/sv36qzrzDOIrYnDZZWoA/+UXmYGypT4rhq+G5hONcMIEF199pSVk\n6oYbcpgzJ5/ffouVn50BCxZo9OjhYvVqjTVr4JJL4OCDlRtIcTHsvXfDl2vWLPU5aZKG0yn5978t\ncnN1XnwRRozQAQ9t2pjcdFPd91J7tJrfocbRKKxcCTNnwr33QsuWTq677ijOPjtMx46w++7Jl3tT\n4eGHRbXtNmECfPklPPYYKS/gJM/ZUt+bk7L13HMuRo0yOPlkePddNZb89ptWrSLaVHjySTd3321w\nwAEqhPVVV2V+r2AwGb7aVrayAxWVrWg0rmzt3DLVhrIyGDPGQ/v2km+/FdUqNT4ffPhhCQsX/kmv\nXrtTUJDDwIHQvXuMPffUkFKw++6Cb75JXtOpk8nKlUnLwfTpJt27q0F9zhzo0AEOOEBSUGDx6qsS\nw9DKr4uwcGGY44+PUFjoZdCgzOTa6YTTT1f/9+oFfr9F//6SiRNFIlp0xUON41EkL788xMaNTt5+\nW2PhQli4UOeVVwRbt6rFb12Hdet0Ro0qpGtXyQ8/JDnnzVN1GD06ufj41FMFHH/8fkycmH4dKmL8\neLVAc9BBlAdhaVhcdFGYffZxc+qpOs8+C8OHWykpuH9VNDdlqy2wsWKClNIUQmwt/61OCCFaAv9G\nuRI2GfbYQ7Jmjca2bSaBQGXLQkU30GeeMTnggFL228/LbbcZdOwIt95a+V6TJ8PQodCvX5QvvnBU\nyzdxomDmzPg3N7A3Z59dgmlmptgsWgS9e6v/hw1LDjjjxiXz7L+/STCoBtfhw3N4+GEYMsSkf3+D\nV15JDiaLF8OYMXn89JOeCC+6aBH87W8wbx7stRe0b19zWR58cH+GDXNw4YXq+0svwUUXKVHeay/J\nr78KbroJAgGLzZujTJniYu5cFbJ0wQLIydHYf//a61tWpiaMubnquxDqJdgcJifBINx4o7dS2pw5\nDgzDYtky1U5KmdGYNCmZ7/33Ia6sA/zyizq24Omn1TEH27bBCSfAm28qBRlg/XpRvo8PPvsM/vY3\nwbJl+XTsKJg6FXr0UJGpBg6Em27SeOONZJkiEcFtt6n9jNu2wdKlKv3FFzU++QTeeEOl5eYKvvxS\ncPjhlS0NVZWtuEKcrROVP/+EDz8UvPuul3ffNRLK4YMPKq3k9deTy4Jt2woMw2DgwMYt02uvCS64\noKhS2rRpJgsXhikudvPIIxrz5ysX3TVrdMaO1TjooNrvGQyqwz/j9YuPb9Fo9i1ULF0K11yjFhou\nuMAg/kp8t3zZ7vrrYcqUfA48sCeffrpzyvjWW85EWa+/Xk0Iq66qp4pgMH7OlrLW2+c51Q9btgiW\nLIGjjsr8HvFJu9ut+kPTsvs9M3y4xrx5GsOHW7RqJWjVqm7lRgj1jojF4KuvivF6fRQUGPj9ysqj\naSZ9+27H4fAxYYLBTTcpReG++wIMGOCid2/1XK5bZ7JhQwmQ1/gVRVmFHnpILWYDlUK/d++uPq+8\nMsSrr6pn1OuFr74S7LtvYdVbAfDDD5XHwE8/rf70pRkz2nPyyVFOPRVuuCH9cn/yieDNN+HRR+tv\nDa8NgwfLhEdGNstsNiArlC0hxBjg1lqySKBLbbcoz1MXTx4wDViKCppRK0aNGkWLFi1UAaQkFosx\nbNgwzj23pu1hNeMf/5Dcd5/k5ps1AgHYbbfkb253sujnnScJhUx8PnjuOZXWt2+MkpIytm/3cvDB\nOp07Q1lZjEWLyrjkEh8TJggeeACmTEnec+bMHR/iwYPVALV+vTIDp4M5cwSGARs3Ql5e3Sb69u2V\n1SoWgxdeKGXVqhzGjFFluvtuZXGqiBEjdMaN0zjiCIMOHZSJ/b//FRx6qM6QIToXXKCsYsuW6Xz8\n8W58/LEy9+fnw2uvqXsceGCMKVPg9dcNLr4YWre2WLYsyJQpLo44Ah55BG64wWD33fP47bfaxSWu\nbMUtW9A8JierVsGFF6q2HTnSorBQo2NH+PrrIN26LeT773vRu7eHX3+Nu3moIcDt3vGAxrhl6957\nNd5/v5ATTpAsXAhPPKEU4jZtBNdem5/Ye6VQAPTmxRdjCbdS0GnduoCNG1W+Pn0gHJacfXYZoBSM\nu++GG26Icc45Fh9+6OSHH+Ccc+DDD3X22y+P77/X6dIFXC5l9Tn33KSyFZ94Zrtl67TTYN68pOzH\ny7l27Y7P6imn6ICPCy6wOPlkVbcNG5TyG7cQRSKVF2oiEWXJ1ircLhxOtktJCUyfvjsHHaTk+Icf\n4IILKj+Hl1wCxx0nOfTQELNnu3jkEZWuVlkF99/v4YgjBPvuC8ccU/2LvKyMxFlOkN2WrbFjNT7/\nHD7/vPrf42PqkiVF/Pjjdrp3VxOYv/8dunZVfVjNdoA6sWKFxoYNglNOqf73b7+Fnj0h/op+6ikT\nt1tn+HA47TSdESOcXHRRepymqRY44qvzzWE8qwnRaNK1e2fANFXfDxjgY80anUWLYO5c5RKcLoJB\nJUNxOcp0US8Safzzj/74A956Sw0wl1yi9mani9xcZfnSdSgsVONiLCbx+1X6DTeoRQXLgssuUx40\ncXg8UFTUdP7Wffooro8/Vt/jCxUAZ58Np5wSIxi0uOACi2nTNB5/HE46SR2e3KOHJC9P8NlnKv8L\nL5Ry//1ezj5bsG4d/OtfMGOGyWGHlXDccfn07i248UZYujTAddd5mTvXwdy5qs3nz9d5773Uy/3Q\nQ4I+feDKK+0zsTLFpEmTmDhxIoZhIMpfdFu2bMn4flmhbAH/Af5bR55fgPVA64qJQggdtRS/obaL\nhRC5wEygGPiHlLLO9e/Ro0dzVPmSVSwWS/gkZ4IOHeDcc8O8956bwsLKboQVJ0xO546T3r59we+P\n4fPJxIDscMB++5ksXmxiGAZvvqnSP/ssxrZtZZxyiirn4MHQrl2IiRN1ysrU22naNDj//PTKv3y5\noFs3NTim+yI45ZQI0aibBQs0OneGJ5+Enj1jlJbqrFghyM+Hb78VHHaYMlusXg0tW4KalCoT4KOP\nqj9Itv+++8LJJ8P//gd33mly7bWqf+64Q/0ei8Eee1hMn25yySV6YoVo7VqdjRtjuN0q+MinnwpO\nO02wYQN0KVfpAwGBrluV+sYwstdqEscdd8AXXwiOOirK6NEi4WoxYECIhQtDDB4corBQaSexGOTn\nW2zfrtwIV6+GZ56xGD8++QL1epXLH8Cbb6oB5+GH47/WvFn0+++TQ0uLFpKNGzV8PsnYsYLevaFr\nVxO/P0xc2RJCKfH772/y4YdKkZ46Ve2did9r+XJ1v2uvhWHDIBJR/M3hUONIRFlVu3WTfP+94PTT\nLbZt0/jPf+CWW6IMG/Y1ubkH0qFDHq+8AnPmSJYsEUyYoDFhQvI+Xq/gnHOK+PzzGIcfDm+/rdyH\npYRWrXSGDVPWx59/Fuy+u4OzzzZ4911o3Rruu8/L++93Zc89A9x2W9J9qW/fKOecoyUClcSf7+OP\nlxx9tCp7XBl56y0Xb72l+mrPPSEc1unVy8ttt8EBB6g8Fa0nUNmy9Z//6HtFmwAAIABJREFUqP2T\n8VXhnY2KY9knn8TYsCFIy5Y5PPSQUmzOOiv5+2GHVdzoaHDggT5attR45BGYPl3J5EMPebj7bhXl\n7F//ikf/25H32GPzCQQElpVUWIuL1fi8YkXlhTOAv/9d0rkzXHYZzJ8vmD8/F6/X5Jxz1O/r1iWt\n8DUh3t9xRbi5KVuxGKxbJ8jJUYrukUdWHIuaDr//rvZiz5yZHON69FCfRx5Zu1dGdQgGK1sqM9mz\nFY2C12swerSL/HxBjx7JxQAtfX2oRsTHgRUrtrH33o1jXRJC/e1M19042rRRC4v/+5/67vFUnvw4\nHKr/OnVKemZs315x/5rBv/8NCxZYnHxyhPPPd1cKwHDppRK/32LdOjORvsceUR5/3M/PP6u5jpJx\nwW+/aRx4YGrlXr5ccOGFSqHN1sXHbMdZZ53FCSecUCnIyqeffkr//v0zul9WKFtSyi1AnSqjEGIe\nUCCE6FFh39YAlGVrfi3X5aEUrSAwREq5U9ZYu3c3GTdOUFpadc+W+myIoAWHHaYUs2+/jXHQQQZC\nwLZtQYLBrbz66j506aIsZukqW199JTjssMzL1aKFGrBU4Ao466wwl17qorjYYOZMGD9e8t13auW1\nKo4+2uL77zU2bVLfu3Yt5m9/y+HDD52MH6/Sjjqq5uWbY4+VLFigrIn9+km++ELQubNOQUF8f5fO\n44/7+OMPnd9+U4pxIFB5FQvUwJrte7ZWroQuXSTPPFNKKq4WCxdux+nMo2VLg5Yt4dZbLUwzwn//\nqzSYa6+FMWPg1FPDTJ264+7Xvn2jbNliJPYQ5uVJBg1azZtv7pkImvLOOyYzZ0bo29fFCSdUntBX\nxc03B7noIgctWhi0bg0jR5rcfLOJw+FgyhQ1a7AsmDPHqNGNMBtfLj/9pMr14IMWkyZFuf12g332\nUeWfPLmUhQvL6NUrRmFh3N3GZL/9BNGoht8vEu62o0apa8aOVZ8PPggffqjx3HPKFfD559WfUoRV\n/598crwUatZ//fXJwScvT/LBByXVLiI5nWqPnWUpJd7jMfn3v1Ujl5TEA+IIVq50MXWqTATGKCsT\n1Vq2li3Tue02nenTVWCQ+u4hrS8++8xILCQA9OsHpaVRfD7JMceotN13ByG289xzm3j55Y6Vrl+y\nRL0+hwxRCxW33abcEDdutHjjDeUaO3myzkkneSkqUhOln34Cv19LBOB58UVlNTz8cDf33nsUN90U\nZOzYJIfXKxk+PMR++zkwDOXqe+21kpUrBcOG6XzyiXJhO+88g4cfdnHNNTXXN94/bnfzsmxt3QrX\nXQdt2miMHVvIHXeY/Pqr2qd7773w66+5LF/uZNs2uPPOxi3Lbbd5eLbCBoRWrSz23lswf77qz1mz\nVFCme+6hxonx1q2wZo3axw1qcTU+hkHlPVu//qr6qboAXBWxapX6fOQRD5s3K5k+8URYsULnuuuc\nrF2rccUVyoW/Z0/44Yf8jJ6/hQthzz0lLVv+dcwlffsmvWc8nvRX8v7v/yAWS30/U8uWkqefnk+v\nXr0oKkq6I37zjUEwqDxDarOm+/2CtWsFXbumXVQbjYisULZShZTyRyHETOB5IcQVqNnDE8Dr8UiE\nQojdgFnAeVLKheUWrf+hNi4NQylr8VtuasogGfvsox7UrVsrr0K2b6+K0JBuNt26VXbxOe+8Xxg9\nuohvvy3k1FPB4TC4/35XnYEIVq2CgQPz+f13wV131b9cbdvChg0xdD1MTo4Ln0+5Ll10kUlxsR+H\nw8cHHxh88w306mWydGmY2293Ypoaug5Ll25n7dpF/Plnbz7+2MnZZ6u27Ns3GdSiOrRrpyY/LpfJ\nXntpbNigUVqa/P2PP9QkctIktUeurEzsMLAaRnZPTsrK1Mv00UctWrVK7WVYVCQrBT7Yc0/4z3/K\nuPRSg5UrDc47Dy66KEZhYYDnnlMhUB0OJVvPPWfSr18Any+X6dOVYn/UUX42bfqJ++4roKCggOee\no9ySFcLnq/uMCqdT+eobBjzzjFJQxo8P4PP5GD9eIzdXrRzPnu1g330rK1txeb/sMp1589TG4DPO\ngFdfdXH22cnJTVPDspJWn969Jb17l6VkIZ8zZzt5eT6Kiw2eegoeeAB+/VVVcupUVfevvoKvvqp5\n6friiy0++EDj2Wdh+vQQ06aZ/PGHl332gYsugr32qnv407T4hEGy554lSJnDHXfonH66ssB9/bUg\nGBT8+qtaBQ4GSUS8g6SyFY9K+u23SrEZOlTjoINcLFqkMWlSsv+mT3dw5JFqYjdoUOWJKCirz6BB\ndRa7VpgmXHedF12XTJumNvlXZwE4/HDYts3i3HN/ZdSolrz9dgFnnAFXXGGxYYPFL7/orF5deQHm\njTfUjV58EUBU2ounkCS6+GL1OXu2Mm2MHas+L7hAHay6114mfn+QeAjsgQNh2TKTkSOjPPKIhxde\ngBdeiPM6+dvfBJ07q8W0J5+EYFDH51OKbdyyFQ+Q0RyUrSlTBB9/DK++CvF2u+eepEW9Y8cCwuG+\nie9xr4b6IBqFN990csEFlSe127c7ePbZysL49NOlHHBADlOnGjz5JNx4o3IT3rZNEo0qxWbwYMHW\nrQ5++EHjgw/Ue2j1aoNvvlEKeFXLVsU9W8qV2+DQQ/Po2lXjzDOVpeP112HGDAeHHab2PK1Zo/LH\nFS1QHiwguOYaNdm4/36VfsABeSxd2ptu3fwcfXR6bbNgAfTq9ddRtIBK+7szUbbqg5kzVYTDu+6S\nXHqp6sfnn0+OG9VhxQolA7aylV1oVspWOc5BHWr8MepQ4ynAdRV+dwCdgPhaUE/gkPL/y9d/Enu8\n9gJ+r4nINE1i5aOeWb7UZFbwUaourba8FSc2BQVm4vd27dQb74ADZMo86XKr8PQmgwbFGDBAY9Uq\nwT//6eWKK8KJVbPqrn3hBRIh6484IkYslhp3be2Wn6+ChFQto6Ypa9KZZ8KZZ6q0QYNCaJqe8M/f\na68oxcUxzjgjyIgRrsQEKZU2ys1V/594YowXX3Rz8cUWQ4ZIolGLkSM1Sko0Pv5YctNNFqWlErfb\nqlQXh0PHNFNv8+rSwmE1ab7mmhg5Oam3WSptPmeOIBbT6d279utT4enRw6RXLzUp/dvfzPIImiYV\nj5m44gqTQMDC6zUTK+rFxdHy8P4xCgpijByZXvvUlubxqPL066exYIFOhw6q8w2jolyqIe2ll9T1\nY8fqrFnj5b33LGbMiBEKwZIlFhs3OjjwQIuPPza55BLJs89K/v53jW7dTKRUxwYcd5y1Q5tlUvaJ\nE9Xk5JBDJF5v9bJfXd94PGpxYLfdlLLz6ac68+cLRo4MMnmym8GDJb/+qkKsT5lS2aVzwYIIu+9e\nSlGRl6ee0hECDjuslP79l/Hll4dw2WVOOnWKH9yYuqwdf3wUrzfG0KESTYP1603efjvKDTd4+egj\nixEjZMIqHK+PekYNXnpJTVTjiyKTJ2tMnqysbB6PxQcfCG65BW67LY+2bSXr18OAARarVgmOOkpw\n993www8WZ5wB551ncd99qq8yeXbuvBP++ENj9uwohx2m1VlvgKKiGDffrHjee08deHnRRfm8/77O\n3Lkxfv5ZsmBBjCef9HDCCRbTp6v73nprGb/84kIIwZ57wqOPCiIRwXXXxXjsMYOCAklxsWDw4DXE\nYq3p0cPBPfdYNZbJNE2GDw8xd66LhQuTk+uvvzY4/fSk4nfYYQaQT+fOFt9/HytfWTcSfaNpJtGo\nlva7BJSrVNwTo642z3Rc27BBcPbZleV6jz1M1qzROeggyXffqfMpK+K772L89JPkpZdyeeABi44d\nY9x7r0b//hadOql3YI8eOsceKznhBIuHH87l8cdNWrWCadMEw4dr3HsvjByZy7p1McrKLK66yuLb\nbzXOOOMoAJYti7FsmWDw4BjBYAyv1+Tmm0HTBLfeqlNQYPHZZ6pf5s2DceOSVuaKmDnTwf77mwQC\nGh6PIBZT/RLfs6XaTY1nX33l4Kuv4IsvJCtWCIqKjGrvGcd++0l+/FHwwAMxli6N8corSSVx6VJ1\nz4cfdnHEEamPa+EwLFigc8cdVq35qkurTQZSub4heDLlbtNGEHeZd7tjGfFkyn300YqnuFhy9dVq\nEvTbbxaxmFXjtcuXGwgh2Wcfs0Hmaw3dN03F05iylgmanbIlpSym5gOMkVKupsJmEinlHGrbXFIL\nIpEI/iq230A15pPq0qpL9/nUqpVlCXJygoTKN2eFwyHmzt1O27YWgYBMiycV7jhPKBRC0/xMngxL\nluj0759Ply4GH35Ywu67W9Veu3RpLu3aWTz9dCkOR6ySKbwqd0WeTNutrrSqdcnkng89BKeeGqF7\n91jCnfPrr2HsWDfjxrnZvt2P3+/E7bYq1UXT8olGRbUcf/4p8HoDjBzp5cQTI0gJxcUutmwx8Xhi\n/PSTTiwGv/2Wx+efG/z5ZxBdh5tuqn+bAbz3XoShQ9UKeocOpbW2W0P2TVPxVEzbf38306a5GTw4\nftJ7KX5/XH5VG5x0UgTThOnTlVnliy802rUTOJ2SbdsMwEm3bjG+/17n+uvVlQ5HPgMHRunfP8xN\nN3np3z+HU07x0aFDOGNZA7VgceSRUd56qyShaGTSZo89prFuncaRR8a49dZgpXyjRws2btS48MJc\nfv5Zp0WLAC7XjjxFRRFuuWUrOTk5tT7LNaVVTc/JUcFKHnvMw+LFUfz+MkpL3bhcZqI+yt1T7cf0\neCTBoKBv3yjz5iUjHLz8spqcKlc8FeUSYNYslT5hgs7EiYUJN95XXtF45ZWWwLEsWLCOvfdOXdZM\nE55+uoCrrgpxwAHBHdx7Uh3XhIDHHvNz660aXbqYdO0KvXoJVq3SefTRAAce6KJNG8n554eB5Ebc\nG26AefMMjjgixoABBgceGGP6dEnbtqs45BCxQ99UV6bddoOZM4uJROCGG7wcd1yEa6/NrWStj2PF\nCo0ZM0rJzZVAPk6nGtdMM4hl5VJSEkDTkhzffaez114W+fnVv4t+/FGjXz8XPXvG+OijZEEbehyY\nM0c9u8ccE+Hcc8OMH+9m6NAwBxxgsvfeJnPnOpgxQ9Cy5Rqi0fa88YaXgw9OTmlmzIAOHUxWr9Z4\n8UWDDRuSUTeXLhU88oi6/777Vt4nNXKkusc996jPV16BYFApNg8+uJW2bZWHRtxSGC/7SScJZs3y\nct55Yc47r2ZFyOez6N7dZPZsB5ddVsr27Tk4HAZ+v59QKJRQtlavLiEeGbZ79xi//aYlrBYV0bNn\njG++MXjwwQCbNmlICcccE+X5510MH676tqhI4nBAQYHFG284+P57J9OmudiwYesOkS1r6p9XXnER\nDHrp1y9Qa7440pWBVO7ZEDzpchcUGMT3ins85k55h559tvr7xz/yGD3awYoVUR56qAyfr/K14TDc\ndVcB3bubRKP+SpbrnT1fq8rRVDwNKWuRerifNTtlqynhdDoT7j5qBTiA1+tNnCJdXVpdeQ1DuQu2\nb+/B7VYC4na7OfRQb1o86XBb5TtN3W53oj59+5q88EIpI0bk8sEHedxyi9zh2vXrYe5cnUsuCTFo\nkAtdz6mVuzqe+tQn1bpkcs9jj92xPocfbnDffRo//eQjEonhdgcr8TgcGrEYifutXKmsZVdcIZg+\nXadNG8mGDYLp052JPRk14ckn1Rtuzpw2lJTkM2CAwXffuXnqKZNu3dLr79LSAHfdpeTngQdM8vO9\nKbVbU8laQ8tAv36CsjKNlSvV/du2zcXnU3n79o2ydKnB229rmCa0aiXJzZVIKVi/XlTql4pBPEAF\ncPjgAycffKAmYbNne5g9uw8jRgTp3NmJ16s2NKdT9pKSAIsXG/zrXxb5+b56tVnPnnDQQSaBQGwH\nHgjQoYOb99+XTJhg0a5dDmVlDdc3deXt1AlWr3bi8xlEIiYul5Xgie8VAvjxxwi6rlNQIJg4Mcq8\neSZSOpgwQWfIEIv33tPo2NFk1SqdqVNNTj1V55lnTK66SksoWnvvraxqq1ap78OHt2bqVLXXMpX6\nLFmi9k0dc0w0pXrX1m677ZbDHnskr2/XLsC770p0PY97743nC+/Ac+SRiuf441XaGWcUs2hRLO1x\nzefTeeUVME0npaUBdN3FU08ZbNwIM2ZEmTjR5M033cyalcupp6p6uFwmbrebvDw1BjmdXmIxdb9V\nq3QGDDA46SSLQYNM1qyJctddeqLdLrlE49tv1YT/m28MPv88nyOPjBGLBZg/P5djjlGHpjeErM2a\nBQceaDFjhgZ4OOssk0Agksh71lkwaFAxixb9QY8erSgu9vL661TC6tU6Xq9kw4bKSorLJdF1tSdu\n0yYN0xTk50s6dIAff4S99zb58UcDXU9eO2XKbI48shs+X0G1Zff5lMXzzz/Vyvrjj5vsvTesXGlx\n9NEB3G4Ppqnj9aqjWUaPNjAML9GoA59P4PP5sCwLXS/FsgSffqra7ZRTTG6/vZTp03O5/XaN++4z\nWbdOcvrpASZNyuW++2Dbthi77WZUeOZzGDAATFO9Cx54QEu072WXFfPss2v49797MG5cAaNGWbjd\ntfdPTo6XceOcnHaaRe/e7ozf1ZmMLQ3Fkyl3p07JvA6H3Knv0DFjJAMGSKZMcdGli8711/sT11oW\njBol2L5dY8QIs842b07ztWyRNWd10Y5ShK1s1QJd1ytFjkknrab0uLLVurWe6MD68KTCXRPPKacE\nmD7d4uGHddq3V+GMVUhWle/225XLzxVXhND1vDq5G6s+qdSlIXj699do3x5efNGgrMzC7TYr5XM6\n1cZlTdM55RSj3Cc+iQ0b1OQvPqHXNEmvXuByCfbbD5Ytk3z5peDyyy3efVfjzz9h8WIH4OCXX9Q9\n7rrL4NJLBXvtpdGlS2rlXr1a44cfNN59F4YM0YnF5A55d7asNWRa/Ly3JUvUqm9+fvLMqnfe8ZOb\nq6IHGQasX6+iiHo8Km3ECJg9W7JxYzJAwfDhMHWqyeTJMbp1M/jXvyobwj//3MkLL6i0K69Mr+yr\nVmmUlgr69NEr7f+oT5vVlrdLF4P774dYbMd7NkTf1JTeqZNgxgwNw9AIBmX5s+PCMIzEMRPt2lns\ntlvy2uHDY5x+ehler4/774e2bTXWrYvhdvtxOn3k5qpzePLydMCka9dSFi3KZdgwFdxmw4Zt3HXX\nJp5/vhMdO6rQ+EVFdddn/nwwDMnBB8fQ9ZxdZlwbNiyAz+fm0ksFpgm6rrHXXqVs3uziyy81Tjwx\nvsfRQtd13O74REXniy8MbrvNyfXXq+dixgyN99/XAAfPPSfxekViX1BFnHyyznnnCYYNMzjxRDUR\nufNOGDjQwYIFLejVK7P2KS2F6dN1Ro60Um63667TmDMHxo41Oe44dWbTiy8aXHSRSLg8bt4co6jI\nIBIRlJTECIf9tG9fxAEHqCiPOTkqwNSyZaW89pqPE0/UOP54Jbs+Xyylsvt8kuLiGPn5Km3gQBVx\nzudL5jvuuBijRgmmTzcIBjXy8sAwDHRdT+zZmjVLp08fePNNdf3116v9j9dco5d7mZj06ycxDKNS\nlOBUZO2QQzZTUGBx//0a99+v9o5NmqSi71V3/U8/6axaJXjsMZHy89CQ41pD8mQypta3jOlw11af\nQw7R2LxZcP75MHOmzqWXkpCr+fPVPue2bS0uu0zsMuNafXkaQ9YyQQMGBbWRCuLnN3XosJMLUo7R\noy02b1Ybqk8/PSlImzapfS+HHPLXiTxkGCpYx8SJsH69tkM0QhUgQ7BwIQlFq2tXGDTIYvx45b8T\nP4C6b1/Jxo3b+Pxzk88+UxEgn3/e5JFHAjz2mMXatWoFddYsP//5z8IEx7Rp8QlMHXGcK2DJEtVv\nffrUo/LNCD4f7LuvyeLFRTidcofw/FWPUnA6VfRPr1dtLF+zxuQ//wkwfLjFRRcpt7LBgyXjxgW4\n5RbJV1+pQCP9+kXxeqOsWJF8LlasSK+s33yjBumDD65PjbMf++6r9o9FoztGI/R44MsvY0yeXFLt\ntbqu3LJAhaiHZFCMvHJvrBEjJPvvb3LllZIWLdQ1Hg/84x+ruf9+ZTo78EB4+eW6o4XOnq2shHVF\neGuuMIxkhFtQe4GXL0/ulasYIAOUrA8Z4mPZMpEYvypG9Ny6dUdF68MPTbp1U/+/8orGoEHJVeu7\n74bDD/cxatTBjB/v4qGH0q/D5MmCsjLBGWek/u7p00dFlz39dHWNx6OiGPp8cPvtFjffHCQ/X7l/\nulzqgHSPB2bNijFjRlIecnKgc2eLRx6xGDQI3nsPPv00xVBy5fB6a/+9e3fo0yfK+PE7RidWboSC\nzz9XAVricLth5MjMD7SuCCHg1VcDiUPTlyyBJ5+seTo4f75ACDjiiPpzNzcYhlpkGzOmrO7MTYCc\nHHXG4ddfC/bYo4gvvlDpK1eqz/nzixv1IGMbmcFWtpoYkyaZ3Hdf+ocKNxb22Ued1QPqpTtqVA5X\nXaVx4YUqbcKEzDcENkeccYaK6rdwoVFtNMJYDD76SI1k776rwl+//77FKadEmD8/9v/t3XmUVOWd\n//H3t7uhoVuabmVxYcQfKIhCAMHgEgUlrvm5MDpKFCdqnIzocYjEmCFxSzwqxzWMUfGo48Q9BrOY\nyRgV14gKQRF1RNG4gP5UBBoaSraq+v7+eG51VzfdTRd01e3u+rzO6VNdT91b3+dWPX37PvfZuPba\nMEPc00+n6tcLyRgyBP75nzdRUhLShw4NE1GMGFHLnDnr6hc/BFi+vO1/msuWldKrl9dfqBaDoUNT\nuBuVlbndCDALs86dc84m7rwzdD1oOgvduHHhwv1Pf1rP/fe/xJFHbuGUU6BvX9h3XzjmmJI2T+P7\nwgvdGDXKqa7OKZudztCh4W/jgw/CWJamNyoOPDCsrdbeSkvhBz/YRFlZaNk677wwVX5LliwJa2Id\nfXRx3ECCcEMokaB+kfHsdbYALriglEGDQtrq1eEcmHHYYWHQx8KFoQX43XeTLFiwhokTnQULwsyS\npaVbf5bptNGv3wYuvbSCSy8N27XVe++FGUWHD0+yzz65HWtLF5lXXJFmxowNzb52+OGtr411wgm0\neXbXtjKDI4/cwuuvG+vWNZ6dOPN5Ll8eZu3Ml0MOSfLYYw3Pn33WmD+/rH6JlWxLloTJXba1lltX\nddttYYHljuLUU+GMM9KYOf/+76VceCE89hj07+9F+x11dOpGWGCnnNKwMHFHMWkSLFoU7r7Pnt0w\na9GMGWHwcFsvLLuCQYPCP0J36N17M9BwGzFT2XrnHWPChLC+DlC/MOkBB4QL93Hjwnab2nhuDv94\nk1RVhW5Qq1fD6tUlXHQR/OhHsPfere//6acl7LlnyxcaXVG/fqGiFAb9589OOyWZM2c9NTU1/P73\n8I//CM8+W8Jrr5Vtc/FSd3jxxW6cdZYTJkDtukaODN/D66+HRY2zW7YKYc6ccLPoyiuNadMqKS8v\n4dFHt97u5z8PrWg//GEHWDG1QA45JIxPyrTGZ6blz1S23nnHuOGGjYwZU878+WVMmwYnnxxmHx07\ndh1lZVX071/GPfeE81pmMpoePcJivk8+mWbx4o1ccEE5K1eW8U//BBUVWzj00KVcfXVYvXrevJbX\nnWrqySfD4x/+sI6wRGbXNHp0itpa4623Gi/ynV15zSwZkS+VlfDZZ/DXv8Lkycbxx1fx7W+n6xfx\nzVi0KP95kbbr0wd+/es0J5+8gdNO68Wrr4b0MWPizZe0rINd9ktcRo2CFStSfP75er7xjXAbPtOl\npJiUl4f1mJYtg6qqxovQhMqWsWyZMWRI+8cuLQ13MzdtStK/fymzZ5cwb17o4tGa5ctLGDCg61/Q\nZ8vcad5Wd532NGlSaLWpqfFGXQtbMm8efPllCccdl6SrdyLYeefQSr5wYaayVdjKzEknhZ8HHnAe\nfzz0I226WCzA/PmhwlxVVTw3kaqqQqv63/4WJobItOR2a5gMkhEjkhx2WDlHHBGen3FGWFetro5G\n6/A1Z/x4Z/ToTZSVlTNgALz8MqxcuZ7nnltD9+7O5s3Gm2+2fR3JRYvCWk41NV279XHkyNBXs2nL\nVklJw3Hvtlv+87H77nDUUQ3P584ticb8hee1tcYLL1ijxbalYzjyyC2cemqahQtL6N4dpk8vnptI\nnU2nuwIwsxoze9DM1ppZrZndbWZtvuQysyfMLG1mJ+Yzn51RdTXssUeaRYuS/O//Uj+ouNhkxtOF\nlq0G3bqFO7uffgp77pmf2BUVYZzKI4+E8S2ZcTAteecd+Mtfuuel8teRZVq2KioKe0HWo0dYIHbW\nrJ7cckvrlds77yxh8OBUo3EXXdnYsaGylUhs3Y2wUCZMaCgPn37a+LU334SPPy7OcSeDB4eW1p49\nGz6f7MrWgAHtd5FmFm5M1dRs5osv1nDxxXD33VBZWcbSpdu+5FiyBIYN69oVLYA+fZzq6nCczXUj\nbJqeTzvvDLvu2hB3/vyG1846ayfSaWvUvVQ6BjN4+OE0H30Uut+edlrX/7vprDpdZQt4CBgGTAS+\nAxwO3NmWHc3sYiBFWNBYWjB8eHGvPp65CKmu3tIk3dm8GT7/PP93HCdMSPLqq0nWrw8zRD3zTPMX\n9pMnh9uPXf0ucFO77x6ON3sgf6Gce26aFStKuPTSUj75pPlt3GHuXGPSpM1bjQnrqr7xjVD5j6Mb\nYcZ116W5+eYwE0RmjFLG008blZVw/PExZCxmgweHx6qq5itb/frl7/xx2WUNv//2t+Utb0ioqC9a\nBKNHF8f5bI89wmP2jc1MZavQ47qXLEkxb95a9tvPOfRQuPrq0Dr/yivdOPPMdH1eRSR3neoywMz2\nBY4Bvu/uC939ZeAiYLKZ7bqNfUcCPwTOpZj6W0nOJk8Oj/vtt6ZRekVF6BaWShl9++Y/H2PGhDuO\ntbVw7LGlW1UsvvoqzGjYt2+af/3X4rg4yRg4ML6JW047zbnoojDUMfgsAAAVLElEQVTY/uqrw/Tx\n3uTjX7YMVq40DjgghtpgTAYPDuMNt2yx2Fq2eveGKVM2sf/+zrRpcMgh4e8nnYaFC43RoxtXMorF\noEHhMXtm2ezPYQdmNN6mnXeGuXNDF8bnn2/5w3/77TBt/ObNxTOBSWZh5OzxbJnKViH+x2TbaSfY\nd98U990X/navuAK++93MkhfqniayIzrbmK2DgVp3X5SVNpfQUjUO+GNzO5lZT0KL2IXuvsLaOJNA\nKpUiGV3hhoVDGx5bSstl2+y0XONsb+z2iNOW2PmK01yM9o5zzjlw4olrePPNLY3i9OxpvPVWuD9R\nU5OqX8+qvY8nO+3uu41TTinB3XjjjVIOPDBVPx32vHmGeylPP72WXXbpWV8ZK4ayNmBAaHXcb79k\nm497R9Kyy1pZWYqrrtrAmjVh/a177oG9904zfHjDPvPnG1DKyJHJdjsP5ON42vP7Dl1rw7+U8vJ0\nbGWttBROOCHFzJllLF+eqRDXkEgY06enSSbTRXde22uvUB4zXZJTqRRmSbIvAfJZ1saPh1mznPPP\n78ZVVyW5+OIkvXuH191h1aoUI0aEvEycmGbvvVMkEvkt0x3hvHbDDUmuuKKMUaNSJJMhPTNmq08f\nJ5lMFbysDR+e4vbbjZ/+tITPPzcuv/xrxowp3aHzbGc+r7VHnI5Q1rrieS3XY2mvONujs1W2dgVW\nZCe4e8rMVkevteQW4CV3/+9cgm3evJm6JqOoE5nFSraR1pZtN27cWP+4vXHaEjtfcZqmFSJOazHa\nM86WLVvH6d69kk8/DYPve/RYT11dqsX92+N4EokE48eH2aIGDqzhmGN6U12d5tln6xg4MM0DD1TS\nr58xYEC66Mqa2UZmzVrAccftQV1dz1a33d60bZW1GTPquP/+sLDy44+neOihCi6++Gv69nVefrkn\nu+1Wzq67ert/N/k4nvY4r1VXGxA+jx49UrGWtSOOSDBzZm969nRuvbUsStvC6acn6mfTa2vsrnBe\nGzw4fDe1tQ3xSkvXA9Wt7ru9sZs7nm9+swSo5pprykgmv+bSS8M2l11WwezZoXvh1KkbueSSDSQS\nvt1xdiTfhY4zYUJY8sM9TNiycePG+nXQIEld3bpW989XWTv9dJgwwfjb38o49tgtNPcRFct5rT3j\ndKT/oV3hvNaWOO1Z1ja3dZafZnSIypaZXQe0NvedE8ZptfgWtDAOK5oI40hgVK756t69O1XRVEyp\nVIpEIkFlZWX9KtLNpeWybTqdmUK3R85xcond3nFail2IOM3FKFScqiojnQ6tonvuWVk/S1d7x2lu\nu5NOSjNnTilr1pQwc2YVe+8Njz1WwrXXJjGjKMvasGFrqakZFFtZGzSogvfeSzJ9egk33ZSp8HVj\nv/1g1qxSxo0L+7fXd1Oosra9sQcNati2pmZTrGVt/PgefPBBkpIS2GefUqZO3ciNN5ZQWrrTdpW1\n7f3cOsp5bejQUq6/PsWQIevr4wwb1nj2hXyXtWHDGm5O3XNPTw46qJzjjkvWV7QAZs4so6KiV1H/\nD91//7UATJxYQlVVVWxlraoKBg1KkUhsyev5Jh/Ho+u14jivFbqsde/ene3VISpbwI3AvdvY5kPg\nC6DR0q1mVkq4nfplC/sdAQwC1jbpPvg7M3vR3Y9sKeAVV1zBLtEoVXcnmUxy5plnMmXKlEbblZaW\nUtbM4lnNpWenZb7AbW2Xa1qh4jRNK0Sc1mLkO05lZcMd8f79y7ZaL629jyc77a67kowaleCzz3py\nxx0NQy1PO6194+SaVqg4HbWsDRlSxlFHNaxhdOedDSf1KVN8h+O0Zzlvj8+srdvussum2Mva4MEh\nbc2aJBs3bqC0tKpTl7UdjfPjH0NtrbNwYUjr0SNs17t3y+W0vY/n3nvXUV5ewXnnlTJpUikHHNBw\nLrv/fqiqKnyZ7mjnte7d0yxfXsvuu9c0mlynM5W1bcXpjOe1rljW8h2nM5e1Rx55hAcffJCysjIy\ndYdVq1Zt9T5ttXUOYuDuq4BtHoWZvQJUm9norHFbEwktW/Nb2O064K4maW8D04BWuxVec801TJgw\nAYBkMkldXV2j2rkUl8rKcFHSq5dTXl7YOVZ22gmmTt3EV1+Vc8cdJfTqFSbH6NeveNYL6qhOPRVm\nznS+9a3NzJkT7tKnUpBOe9F+N9XV29/dor316NH2NZ6KzSuvwC67bP84hFydeOIWqqqcU08Nazu+\n/no4j371VZI+fTrE5UiHUFlJ0cxiKtIRTZ48meOPP56qqoabdM8//zxHZBYjzFGnOru5+7tm9iRw\nl5lNBboDtwIPu/sXAGa2O/AMcFY0Y+EKmozzimqpy929hYmbRbZWUREe+/SJLw+DB8OCBTBiRLiI\njGPqc2lsjz1g+fIUq1cn6N27G9//fgklJWEGvGLzwgvwwAMbKS3d9rYSv4MOCueQQt8U6NYNFi+G\nyy9Pk0hsorq6CKeIFJGi0akqW5EzgF8RZiFMA3MIrVQZ3YAhQEUr71Ec88pKu8pUtgq9/klTBx4Y\nb3xpXlkZzJ6dpqyseG9JH344jBixgYUL486JdHRlZXD11Wnq6jYQ/m2LiHRNna6y5e5rgCmtvP4J\n0Op9VXfXfVfJ2c47h8fy1tflFBEREREBOtmixiJxGjUqNIiWl6thVERERES2TZUtkTbaZx84++yN\nzJpVhINxRERERCRnna4boUhcSkrgppu+1oyUIiIiItImqmy1IpFIUFtbC4QFzjZu3Eg6na6f07+5\ntFy2XbduXaPHXPbNJXZ7x2kpdiHiNBejUHF29Ptu7++mUHFU1lTWChVHZU1lrVBxVNZU1goVR2Wt\na5S1RCLB9jJ3jT9pyswOAF677bbb2GeffeLOjoiIiIiIxOT999/nwgsvBBjj7q/nsq9atloxcOBA\nxo4dCzTUcnv06LFVzTc7LZdt161bx3vvvcfQoUPp1atXTvvmEru947QUuxBxmotRqDg7+n2393ej\nsqayprKmsqayprKmsqayprKW/7K2efNmtpcqW62orKykpqYGgGQySV1dXaPVpJtLy3VbgF69euUc\nJ5fY7R2ntRiFipMdo1BxdvT7bu/vplBxVNZU1goVR2VNZa1QcVTWVNYKFUdlrWuUtcrKyq0+v7bq\nVLMRmlmNmT1oZmvNrNbM7jazbR69mR1sZs+Y2fpo3+fNTKslSauee+65uLMgMVMZEFA5EJUBURmQ\n7depKlvAQ8AwYCLwHeBw4M7WdjCzg4EngL8AY6OfXwGav1tapROrqAwIqByIyoCoDMj26zTdCM1s\nX+AYwsC0RVHaRcCfzewSd/+ihV1vBn7p7jdkpb2f39yKiIiIiEix60wtWwcDtZmKVmQu4MC45nYw\ns77RayvNbJ6ZfRF1ITw0/9kVEREREZFi1pkqW7sCK7IT3D0FrI5ea86g6PFKQnfDY4DXgWfMbHCe\n8ikiIiIiIhJ/N0Izuw74SSubOGGcVotvEW3TnExlcra73xf9Pt3MJgLnAj9rYb8eAEuXLqWqqgoI\nM5MkEgkqKysbzVbSNC2Xbevq6nj//ffp1q1bznFyid3ecVqKXYg4zcXIV5xEIsHixYtbPZZ8HI/K\nWscpa03LQL7itOVYVNbiK2ttORd0lvOaytr2lbV8l4F8HI/KWvuWtfYsA/k4HpW1/Ja1pUuXZt6m\nBzmKfVFjM9sF2GUbm30InAXc6O7125pZKbARONXd/9jMe+8V7TvF3R/KSn8E2OLuZ7WQpzOAB3M7\nEhERERER6cLOzK5TtEXsLVvuvgpYta3tzOwVoNrMRmeN25pIaNma38J7f2xm/w8Y2uSlIcD/tBLu\nSeBM4GNCZU5ERERERIpTD2AvQh0hJ7G3bOXCzP4H6AdMBboD/wksyLRQmdnuwDPAWe6+MEqbBlwF\nnAe8AZwNTAeGu/tHBT4EEREREREpErG3bOXoDMIaWXMJ62TNAaZlvd6N0GpVkUlw91nRAsY3AzsD\ni4Fvq6IlIiIiIiL51KlatkRERERERDqLzjT1u4iIiIiISKehypaIiIiIiEgeqLLVDDO70Mw+MrMN\nZvaqmR0Yd56kMMzsMDN73Mw+M7O0mZ0Yd56ksMxshpktMLM6M/vSzH5vZkPizpcUjpmdb2aLzWxt\n9POymR0bd74kPtF5IW1mN8edFykcM7sy+t6zf96JO19SWGa2u5ndb2Yrzezr6P/DAW3dX5WtJszs\ndOAm4EpgNGFCjSfNrE+sGZNCqSTMWnkhLS+WLV3bYcCtwDjg24SJd54ys56x5koKaTnwE2BM9PMs\n8EczGxZrriQW0Q3XfyFcD0jxeRvoD+wa/Xwr3uxIIZlZNTAP2AQcAwwDfgTUtvk9NEFGY2b2KjDf\n3adFz43wj/c/3P36WDMnBWVmaeBkd3887rxIfKIbLSuAw939pbjzI/Ews1XAJe5+b9x5kcIxs52A\n1whLzlwOLHL36fHmSgrFzK4ETnL3NrdiSNdiZjOBg919/Pa+h1q2sphZN8JdzGcyaR5qo3OBg+PK\nl4jEqprQyrk67oxI4ZlZiZlNJiwp8krc+ZGCuw34k7s/G3dGJDb7REML/m5mD5jZP8SdISmoE4CF\nZvZoNLTgdTM7L5c3UGWrsT5AKfBlk/QvCU3HIlJEopbtXwIvubv66RcRMxtuZusIXUduBya5+7sx\nZ0sKKKpkjwJmxJ0Xic2rwNmE7mPnA/8HeNHMKuPMlBTUIELL9nvA0cBs4D/MbEpb36CzLWocF0Pj\nd0SK0e3AfsChcWdECu5dYCShZfMU4D4zO1wVruJgZgMIN1qOcvctcedH4uHuT2Y9fdvMFgCfAKcB\n6lJcHEqABe5+efR8sZntT6iAPdDWN5AGK4EUYSBktn5s3dolIl2Ymf0KOB6Y4O6fx50fKSx3T7r7\nh+7+urv/jDA5wrS48yUFMwboC7xmZlvMbAswHphmZpujVm8pMu6+FlgK7B13XqRgPgeWNElbAuzZ\n1jdQZStLdPfqNWBiJi06oU4EXo4rXyJSWFFF6yTgCHdfFnd+pEMoAcrjzoQUzFxgBKEb4cjoZyHh\nTvZI1+xiRSmaMGUw4QJcisM8YGiTtKGEFs42UTfCrd0M/NrMXgMWABcTBkb/V5yZksKI+mHvTeg6\nCjDIzEYCq919eXw5k0Ixs9uB7wInAgkzy7R0r3X3jfHlTArFzK4BniDMRNsLOJPQqnF0nPmSwnH3\nBNBonKaZJYBV7t70Lrd0UWZ2A/AnwoX1HsDPgSTwcJz5koK6BZhnZjOARwnLwpxHWA6iTVTZasLd\nH42mev4FoTvhG8Ax7v5VvDmTAhkLPEcYo+eENdcAfg2cG1empKDOJ3z3zzdJPwe4r+C5kTj0J3zX\nuwFrgTeBozUjXdFTa1bxGQA8BOwCfAW8BBzk7qtizZUUjLsvNLNJwEzC8g8fAdPc/ZG2vofW2RIR\nEREREckDjdkSERERERHJA1W2RERERERE8kCVLRERERERkTxQZUtERERERCQPVNkSERERERHJA1W2\nRERERERE8kCVLRERERERkTxQZUtERERERCQPVNkSERERERHJA1W2RESkQzCz8WaWMrOquPPSGUSf\nV1qfl4hIx6XKloiIxMLMnjOzm7OS5gG7uXtdgeJPMLNlhYiVRx53BkREpGVlcWdAREQEwN2TwIoC\nhjwBeLyA8UREpMioZUtERArOzO4FxgPToq5wKTP7Xna3uOh5rZl9x8zeNbOEmT1qZj2j1z4ys9Vm\nNsvMLOu9u5vZjWb2qZmtN7NXzGx8M9k4kaiyZWanmtmbZva1ma00s6fMrGfWe55nZu+Y2YbocWqT\n49nDzB42s1VRzAVmdmDW61PN7AMz22RmS8xsSpP902b2fTP7XXScS83shCbbHG9m70V5fAbYa3s/\nfxERKQy1bImISBymAUOAt4DLAQOGs3W3uArgIuA0oAr4ffRTCxwHDAJ+B7wE/Dba5zZg32ifz4FJ\nwBNmNsLd/w5gZvsDfYHnzGxX4CHgEuAPQC/gsChPmNmZwFXAhcAbwGjgLjNb7+73m1kl8CKwHPi/\nwJfAAUQ3NM1sEvBL4N+AZwgtavea2XJ3fyHrWK8Afhzl49+AB81sT3dfY2YDgMeAW4G7gLFAdhdM\nERHpgMxd3b1FRKTwzOw5YJG7T4+ejweeBWrcvc7Mvgf8JzDY3T+OtrkDmAL0c/cNUdoTwEfufoGZ\n7Qn8HfgHd/8iK9bTwHx3vyx6PgMY5e6nm9loYCGwl7svbyaf7wOXuftvstJ+Bhzn7t8ysx8A1wMD\n3X1tM/u/BLzl7lOz0n4DVLj7CdHzNPALd78qel4BrItiPGVm1wInuPuIrPe4Drg083m17VMXEZFC\nUsuWiIh0ZF9nKlqRL4GPMxWtrLR+0e/DgVJgaXbXQqA7sDLr+UmEViKAxYQWp7fN7EngKWBO1KJU\nAQwG7jGzu7P2LyO0rgGMJFQat6poRYYBdzZJm0dovcr2VuYXd//azNZlHde+wPwm27/SQjwREekg\nVNkSEZGObEuT595CWmYM8k5AktCNL91ku/UAZtYfGAX8GcDd08DRZnYwcDSh2+I1ZvZNIFOpOw9Y\n0OT9UtHjBrataTcSayatteNqbnsREengNEGGiIjEZTOhFao9LYres7+7f9jkJzPT4YnAy+6+JntH\nd3/F3X9OGJO1GZgU7fMZoStj0/f7JNr1TWCUmVW3kKclwLeapB0SpbfVO8C4JmkH57C/iIjEQC1b\nIiISl4+BcWY2kNDqVEI0KcX2cvf3zewh4D4zu4RQ+eoHHAksdvcnyJqFECBqwZpI6D64AjgI6EOo\n4ECYHGOWmdUBfwHKCRNU1Lj7LcDDwE+BP5jZTwmTcowGPnP3+cANwG/MbBGhu+KJhEk7JuZwaLOB\n6WZ2PXB3FP97OewvIiIxUMuWiIjE5UZCV7x3CJWcPWmfrnJnA/dF7/8uYfbCscCyaAzWRBqvr1UH\nHE7oVvge8Atgurs/BeDu9xC6EZ5DaMV6nlDR+TB6fQtwVHQMf462+Ul0bLj7HwmzL14CvA38C3C2\nu/81Kw/NHXd9WjRxxymEsWZvAD8AZuT2sYiISKFpNkIRESka0TTsV7v78LjzIiIiXZ9atkREpJis\nI7Q6iYiI5J1atkRERERERPJALVsiIiIiIiJ5oMqWiIiIiIhIHqiyJSIiIiIikgeqbImIiIiIiOSB\nKlsiIiIiIiJ5oMqWiIiIiIhIHqiyJSIiIiIikgeqbImIiIiIiOSBKlsiIiIiIiJ58P8B5c2DVROJ\nnLAAAAAASUVORK5CYII=\n",
- "text/plain": [
- ""
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "import wfdb\n",
- "rec = wfdb.rdsamp('sampledata/100', sampto=2000)\n",
- "wfdb.plotrec(rec,timeunits='seconds', ecggrids='all', figsize = (10, 4))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": true
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.5.2"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000..cbb0a435
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXPROJ = wfdb
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
diff --git a/wfdb/readwrite/__init__.py b/docs/_static/.gitkeep
similarity index 100%
rename from wfdb/readwrite/__init__.py
rename to docs/_static/.gitkeep
diff --git a/docs/_templates/.gitkeep b/docs/_templates/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..89ba3ebe
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# wfdb documentation build configuration file, created by
+# sphinx-quickstart on Tue Jan 2 13:19:02 2018.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'wfdb'
+copyright = '2018, MIT Lab for Computational Physiology'
+author = 'Chen Xie, Julien Dubiel'
+
+import wfdb
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = wfdb.__version__
+# The full version, including alpha/beta/rc tags.
+release = wfdb.__version__
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# This is required for the alabaster theme
+# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
+html_sidebars = {
+ '**': [
+ 'relations.html', # needs 'show_related': True theme option to display
+ 'searchbox.html',
+ ]
+}
+
+
+# -- Options for HTMLHelp output ------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'wfdbdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'wfdb.tex', 'wfdb Documentation',
+ 'Chen Xie, Julien Dubiel', 'manual'),
+]
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'wfdb', 'wfdb Documentation',
+ [author], 1)
+]
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'wfdb', 'wfdb Documentation',
+ author, 'wfdb', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+
+
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..8b9027f0
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,79 @@
+wfdb
+====
+
+Introduction
+------------
+
+The native Python waveform-database (WFDB) package. A library of tools
+for reading, writing, and processing WFDB signals and annotations.
+
+Core components of this package are based on the original WFDB
+specifications. This package does not contain the exact same
+functionality as the original WFDB package. It aims to implement as many
+of its core features as possible, with user-friendly APIs. Additional
+useful physiological signal-processing tools are added over time.
+
+
+Development
+-----------
+
+The development repository is hosted at: https://github.com/MIT-LCP/wfdb-python
+
+The package is to be expanded with physiological signal-processing tools, and general improvements. Development is made for Python 2.7 and 3.5+ only.
+
+
+API Reference
+--------------
+
+The exact API of all accessible functions and classes, as given by the docstrings, grouped by subpackage:
+
+.. toctree::
+ :maxdepth: 2
+
+ io
+ plot
+ processing
+
+
+Core Components
+---------------
+
+A subset of the above components are accessible by directly importing the base package.
+
+.. toctree::
+ :maxdepth: 2
+
+ wfdb
+
+
+Other Content
+-------------
+.. toctree::
+ :maxdepth: 2
+
+ installation
+ wfdb-specifications
+
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
+
+Authors
+-------
+
+`Chen Xie`_
+
+`Julien Dubiel`_
+
+
+.. _PEP8: https://www.python.org/dev/peps/pep-0008/
+.. _docstrings: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+.. _existing tests: https://github.com/MIT-LCP/wfdb-python/tree/master/tests
+
+.. _Chen Xie: https://github.com/cx1111/
+.. _Julien Dubiel: https://github.com/Dubrzr/
diff --git a/docs/installation.rst b/docs/installation.rst
new file mode 100644
index 00000000..c9f28c7d
--- /dev/null
+++ b/docs/installation.rst
@@ -0,0 +1,10 @@
+Installation
+------------
+
+The distribution is hosted on pypi at: https://pypi.python.org/pypi/wfdb/. To directly install the package from pypi without needing to explicitly download content, run from your terminal::
+
+ $ pip install wfdb
+
+The development version is hosted at: https://github.com/MIT-LCP/wfdb-python. This repository also contains demo scripts and example data. To install the development version, clone or download the repository, navigate to the base directory, and run::
+
+ $ pip install .
\ No newline at end of file
diff --git a/docs/io.rst b/docs/io.rst
new file mode 100644
index 00000000..7cc70e0e
--- /dev/null
+++ b/docs/io.rst
@@ -0,0 +1,35 @@
+io
+===
+
+The input/output subpackage contains classes used to represent WFDB
+objects, and functions to read, write, and download WFDB files.
+
+
+WFDB Records
+---------------
+
+.. automodule:: wfdb.io
+ :members: rdrecord, rdsamp, wrsamp
+
+.. autoclass:: wfdb.io.Record
+ :members: wrsamp, adc, dac
+
+.. autoclass:: wfdb.io.MultiRecord
+ :members: multi_to_single
+
+
+WFDB Anotations
+---------------
+
+.. automodule:: wfdb.io
+ :members: rdann, wrann, show_ann_labels, show_ann_classes
+
+.. autoclass:: wfdb.io.Annotation
+ :members: wrann
+
+
+Downloading
+-----------
+
+.. automodule:: wfdb.io
+ :members: get_dbs, get_record_list, dl_database, dl_files
diff --git a/docs/plot.rst b/docs/plot.rst
new file mode 100644
index 00000000..ef947d3a
--- /dev/null
+++ b/docs/plot.rst
@@ -0,0 +1,5 @@
+plot
+----
+
+.. automodule:: wfdb.plot
+ :members: plot_items, plot_wfdb, plot_all_records
diff --git a/docs/processing.rst b/docs/processing.rst
new file mode 100644
index 00000000..a93cb02e
--- /dev/null
+++ b/docs/processing.rst
@@ -0,0 +1,41 @@
+processing
+==========
+
+The processing subpackage contains signal-processing tools.
+
+
+Basic Utility
+-------------
+
+Basic signal processing functions
+
+.. automodule:: wfdb.processing
+ :members: resample_ann, resample_sig, resample_singlechan,
+ resample_multichan, normalize_bound, get_filter_gain
+
+Heart Rate
+----------
+
+.. automodule:: wfdb.processing
+ :members: compute_hr
+
+
+Peaks
+-----
+
+.. automodule:: wfdb.processing
+ :members: find_peaks, find_local_peaks, correct_peaks
+
+
+QRS Detectors
+-------------
+
+.. automodule:: wfdb.processing
+ :members: XQRS, xqrs_detect, gqrs_detect
+
+
+Annotation Evaluators
+---------------------
+
+.. automodule:: wfdb.processing
+ :members: Comparitor, compare_annotations
diff --git a/docs/wfdb-specifications.rst b/docs/wfdb-specifications.rst
new file mode 100644
index 00000000..e8ceef4e
--- /dev/null
+++ b/docs/wfdb-specifications.rst
@@ -0,0 +1,16 @@
+WFDB Specifications
+-------------------
+
+The wfdb-python package is built according to the specifications of the original WFDB package.
+
+* `WFDB Software Package`_
+* `WFDB Applications Guide`_
+* `WFDB Header Specifications`_
+* `WFDB Signal Specifications`_
+* `WFDB Annotation Specifications`_
+
+.. _WFDB Software Package: http://physionet.org/physiotools/wfdb.shtml
+.. _WFDB Applications Guide: http://physionet.org/physiotools/wag/
+.. _WFDB Header Specifications: https://physionet.org/physiotools/wag/header-5.htm
+.. _WFDB Signal Specifications: https://physionet.org/physiotools/wag/signal-5.htm
+.. _WFDB Annotation Specifications: https://physionet.org/physiotools/wag/annot-5.htm
diff --git a/docs/wfdb.rst b/docs/wfdb.rst
new file mode 100644
index 00000000..542d9aea
--- /dev/null
+++ b/docs/wfdb.rst
@@ -0,0 +1,42 @@
+wfdb
+====
+
+These core components are accessible by importing the `wfdb` package
+directly, as well as from their respective subpackages.
+
+
+WFDB Records
+---------------
+
+.. automodule:: wfdb
+ :members: rdrecord, rdsamp, wrsamp
+
+.. autoclass:: wfdb.Record
+ :members: wrsamp, adc, dac
+
+.. autoclass:: wfdb.MultiRecord
+ :members: multi_to_single
+
+
+WFDB Anotations
+---------------
+
+.. automodule:: wfdb
+ :members: rdann, wrann, show_ann_labels, show_ann_classes
+
+.. autoclass:: wfdb.Annotation
+ :members: wrann
+
+
+Downloading
+-----------
+
+.. automodule:: wfdb
+ :members: get_dbs, get_record_list, dl_database
+
+
+Plotting
+--------
+
+.. automodule:: wfdb
+ :members: plot_items, plot_wfdb, plot_all_records
diff --git a/sampledata/03700181.dat b/sample-data/03700181.dat
similarity index 100%
rename from sampledata/03700181.dat
rename to sample-data/03700181.dat
diff --git a/sampledata/03700181.hea b/sample-data/03700181.hea
similarity index 100%
rename from sampledata/03700181.hea
rename to sample-data/03700181.hea
diff --git a/sampledata/100.atr b/sample-data/100.atr
similarity index 100%
rename from sampledata/100.atr
rename to sample-data/100.atr
diff --git a/sampledata/100.dat b/sample-data/100.dat
similarity index 100%
rename from sampledata/100.dat
rename to sample-data/100.dat
diff --git a/sampledata/100.hea b/sample-data/100.hea
similarity index 100%
rename from sampledata/100.hea
rename to sample-data/100.hea
diff --git a/sample-data/100.qrs b/sample-data/100.qrs
new file mode 100644
index 00000000..51538cb8
Binary files /dev/null and b/sample-data/100.qrs differ
diff --git a/sampledata/1003.atr b/sample-data/1003.atr
similarity index 100%
rename from sampledata/1003.atr
rename to sample-data/1003.atr
diff --git a/sampledata/1003.hea b/sample-data/1003.hea
similarity index 100%
rename from sampledata/1003.hea
rename to sample-data/1003.hea
diff --git a/sampledata/100_3chan.dat b/sample-data/100_3chan.dat
similarity index 100%
rename from sampledata/100_3chan.dat
rename to sample-data/100_3chan.dat
diff --git a/sampledata/100_3chan.hea b/sample-data/100_3chan.hea
similarity index 100%
rename from sampledata/100_3chan.hea
rename to sample-data/100_3chan.hea
diff --git a/sampledata/100skew.hea b/sample-data/100skew.hea
similarity index 100%
rename from sampledata/100skew.hea
rename to sample-data/100skew.hea
diff --git a/sampledata/12726.anI b/sample-data/12726.anI
similarity index 100%
rename from sampledata/12726.anI
rename to sample-data/12726.anI
diff --git a/sampledata/12726.dat b/sample-data/12726.dat
similarity index 100%
rename from sampledata/12726.dat
rename to sample-data/12726.dat
diff --git a/sampledata/12726.hea b/sample-data/12726.hea
similarity index 100%
rename from sampledata/12726.hea
rename to sample-data/12726.hea
diff --git a/sampledata/12726.wabp b/sample-data/12726.wabp
similarity index 100%
rename from sampledata/12726.wabp
rename to sample-data/12726.wabp
diff --git a/sampledata/12726.wqrs b/sample-data/12726.wqrs
similarity index 100%
rename from sampledata/12726.wqrs
rename to sample-data/12726.wqrs
diff --git a/sampledata/3000003_0003.dat b/sample-data/3000003_0003.dat
similarity index 100%
rename from sampledata/3000003_0003.dat
rename to sample-data/3000003_0003.dat
diff --git a/sampledata/3000003_0003.hea b/sample-data/3000003_0003.hea
similarity index 100%
rename from sampledata/3000003_0003.hea
rename to sample-data/3000003_0003.hea
diff --git a/sampledata/310derive.dat b/sample-data/310derive.dat
similarity index 100%
rename from sampledata/310derive.dat
rename to sample-data/310derive.dat
diff --git a/sampledata/310derive.hea b/sample-data/310derive.hea
similarity index 100%
rename from sampledata/310derive.hea
rename to sample-data/310derive.hea
diff --git a/sampledata/310derive_2.dat b/sample-data/310derive_2.dat
similarity index 100%
rename from sampledata/310derive_2.dat
rename to sample-data/310derive_2.dat
diff --git a/sampledata/310derive_2.hea b/sample-data/310derive_2.hea
similarity index 100%
rename from sampledata/310derive_2.hea
rename to sample-data/310derive_2.hea
diff --git a/sampledata/310derive_3.dat b/sample-data/310derive_3.dat
similarity index 100%
rename from sampledata/310derive_3.dat
rename to sample-data/310derive_3.dat
diff --git a/sampledata/310derive_3.hea b/sample-data/310derive_3.hea
similarity index 100%
rename from sampledata/310derive_3.hea
rename to sample-data/310derive_3.hea
diff --git a/sampledata/311derive.dat b/sample-data/311derive.dat
similarity index 100%
rename from sampledata/311derive.dat
rename to sample-data/311derive.dat
diff --git a/sampledata/311derive.hea b/sample-data/311derive.hea
similarity index 100%
rename from sampledata/311derive.hea
rename to sample-data/311derive.hea
diff --git a/sampledata/311derive_2.dat b/sample-data/311derive_2.dat
similarity index 100%
rename from sampledata/311derive_2.dat
rename to sample-data/311derive_2.dat
diff --git a/sampledata/311derive_2.hea b/sample-data/311derive_2.hea
similarity index 100%
rename from sampledata/311derive_2.hea
rename to sample-data/311derive_2.hea
diff --git a/sampledata/311derive_3.dat b/sample-data/311derive_3.dat
similarity index 100%
rename from sampledata/311derive_3.dat
rename to sample-data/311derive_3.dat
diff --git a/sampledata/311derive_3.hea b/sample-data/311derive_3.hea
similarity index 100%
rename from sampledata/311derive_3.hea
rename to sample-data/311derive_3.hea
diff --git a/sampledata/311derive_4.dat b/sample-data/311derive_4.dat
similarity index 100%
rename from sampledata/311derive_4.dat
rename to sample-data/311derive_4.dat
diff --git a/sampledata/311derive_4.hea b/sample-data/311derive_4.hea
similarity index 100%
rename from sampledata/311derive_4.hea
rename to sample-data/311derive_4.hea
diff --git a/sampledata/a103l.hea b/sample-data/a103l.hea
similarity index 100%
rename from sampledata/a103l.hea
rename to sample-data/a103l.hea
diff --git a/sampledata/a103l.mat b/sample-data/a103l.mat
similarity index 100%
rename from sampledata/a103l.mat
rename to sample-data/a103l.mat
diff --git a/sampledata/drive02.dat b/sample-data/drive02.dat
similarity index 100%
rename from sampledata/drive02.dat
rename to sample-data/drive02.dat
diff --git a/sampledata/drive02.hea b/sample-data/drive02.hea
similarity index 100%
rename from sampledata/drive02.hea
rename to sample-data/drive02.hea
diff --git a/sampledata/multisegment/fixed1/v102s.dat b/sample-data/multi-segment/fixed1/v102s.dat
similarity index 100%
rename from sampledata/multisegment/fixed1/v102s.dat
rename to sample-data/multi-segment/fixed1/v102s.dat
diff --git a/sampledata/multisegment/fixed1/v102s.hea b/sample-data/multi-segment/fixed1/v102s.hea
similarity index 100%
rename from sampledata/multisegment/fixed1/v102s.hea
rename to sample-data/multi-segment/fixed1/v102s.hea
diff --git a/sampledata/multisegment/fixed1/v102s_1.hea b/sample-data/multi-segment/fixed1/v102s_1.hea
similarity index 100%
rename from sampledata/multisegment/fixed1/v102s_1.hea
rename to sample-data/multi-segment/fixed1/v102s_1.hea
diff --git a/sampledata/multisegment/fixed1/v102s_2.hea b/sample-data/multi-segment/fixed1/v102s_2.hea
similarity index 100%
rename from sampledata/multisegment/fixed1/v102s_2.hea
rename to sample-data/multi-segment/fixed1/v102s_2.hea
diff --git a/sampledata/multisegment/fixed1/v102s_3.hea b/sample-data/multi-segment/fixed1/v102s_3.hea
similarity index 100%
rename from sampledata/multisegment/fixed1/v102s_3.hea
rename to sample-data/multi-segment/fixed1/v102s_3.hea
diff --git a/sampledata/multisegment/s00001/3248903_layout.hea b/sample-data/multi-segment/s00001/3248903_layout.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3248903_layout.hea
rename to sample-data/multi-segment/s00001/3248903_layout.hea
diff --git a/sampledata/multisegment/s00001/3248903n.dat b/sample-data/multi-segment/s00001/3248903n.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3248903n.dat
rename to sample-data/multi-segment/s00001/3248903n.dat
diff --git a/sampledata/multisegment/s00001/3975656_0001.dat b/sample-data/multi-segment/s00001/3975656_0001.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0001.dat
rename to sample-data/multi-segment/s00001/3975656_0001.dat
diff --git a/sampledata/multisegment/s00001/3975656_0001.hea b/sample-data/multi-segment/s00001/3975656_0001.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0001.hea
rename to sample-data/multi-segment/s00001/3975656_0001.hea
diff --git a/sampledata/multisegment/s00001/3975656_0002.dat b/sample-data/multi-segment/s00001/3975656_0002.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0002.dat
rename to sample-data/multi-segment/s00001/3975656_0002.dat
diff --git a/sampledata/multisegment/s00001/3975656_0002.hea b/sample-data/multi-segment/s00001/3975656_0002.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0002.hea
rename to sample-data/multi-segment/s00001/3975656_0002.hea
diff --git a/sampledata/multisegment/s00001/3975656_0003.dat b/sample-data/multi-segment/s00001/3975656_0003.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0003.dat
rename to sample-data/multi-segment/s00001/3975656_0003.dat
diff --git a/sampledata/multisegment/s00001/3975656_0003.hea b/sample-data/multi-segment/s00001/3975656_0003.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0003.hea
rename to sample-data/multi-segment/s00001/3975656_0003.hea
diff --git a/sampledata/multisegment/s00001/3975656_0004.dat b/sample-data/multi-segment/s00001/3975656_0004.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0004.dat
rename to sample-data/multi-segment/s00001/3975656_0004.dat
diff --git a/sampledata/multisegment/s00001/3975656_0004.hea b/sample-data/multi-segment/s00001/3975656_0004.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0004.hea
rename to sample-data/multi-segment/s00001/3975656_0004.hea
diff --git a/sampledata/multisegment/s00001/3975656_0005.dat b/sample-data/multi-segment/s00001/3975656_0005.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0005.dat
rename to sample-data/multi-segment/s00001/3975656_0005.dat
diff --git a/sampledata/multisegment/s00001/3975656_0005.hea b/sample-data/multi-segment/s00001/3975656_0005.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0005.hea
rename to sample-data/multi-segment/s00001/3975656_0005.hea
diff --git a/sampledata/multisegment/s00001/3975656_0006.dat b/sample-data/multi-segment/s00001/3975656_0006.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0006.dat
rename to sample-data/multi-segment/s00001/3975656_0006.dat
diff --git a/sampledata/multisegment/s00001/3975656_0006.hea b/sample-data/multi-segment/s00001/3975656_0006.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0006.hea
rename to sample-data/multi-segment/s00001/3975656_0006.hea
diff --git a/sampledata/multisegment/s00001/3975656_0007.dat b/sample-data/multi-segment/s00001/3975656_0007.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0007.dat
rename to sample-data/multi-segment/s00001/3975656_0007.dat
diff --git a/sampledata/multisegment/s00001/3975656_0007.hea b/sample-data/multi-segment/s00001/3975656_0007.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0007.hea
rename to sample-data/multi-segment/s00001/3975656_0007.hea
diff --git a/sampledata/multisegment/s00001/3975656_0008.dat b/sample-data/multi-segment/s00001/3975656_0008.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0008.dat
rename to sample-data/multi-segment/s00001/3975656_0008.dat
diff --git a/sampledata/multisegment/s00001/3975656_0008.hea b/sample-data/multi-segment/s00001/3975656_0008.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0008.hea
rename to sample-data/multi-segment/s00001/3975656_0008.hea
diff --git a/sampledata/multisegment/s00001/3975656_0009.dat b/sample-data/multi-segment/s00001/3975656_0009.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0009.dat
rename to sample-data/multi-segment/s00001/3975656_0009.dat
diff --git a/sampledata/multisegment/s00001/3975656_0009.hea b/sample-data/multi-segment/s00001/3975656_0009.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0009.hea
rename to sample-data/multi-segment/s00001/3975656_0009.hea
diff --git a/sampledata/multisegment/s00001/3975656_0010.dat b/sample-data/multi-segment/s00001/3975656_0010.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0010.dat
rename to sample-data/multi-segment/s00001/3975656_0010.dat
diff --git a/sampledata/multisegment/s00001/3975656_0010.hea b/sample-data/multi-segment/s00001/3975656_0010.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0010.hea
rename to sample-data/multi-segment/s00001/3975656_0010.hea
diff --git a/sampledata/multisegment/s00001/3975656_0011.dat b/sample-data/multi-segment/s00001/3975656_0011.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0011.dat
rename to sample-data/multi-segment/s00001/3975656_0011.dat
diff --git a/sampledata/multisegment/s00001/3975656_0011.hea b/sample-data/multi-segment/s00001/3975656_0011.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0011.hea
rename to sample-data/multi-segment/s00001/3975656_0011.hea
diff --git a/sampledata/multisegment/s00001/3975656_0012.dat b/sample-data/multi-segment/s00001/3975656_0012.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0012.dat
rename to sample-data/multi-segment/s00001/3975656_0012.dat
diff --git a/sampledata/multisegment/s00001/3975656_0012.hea b/sample-data/multi-segment/s00001/3975656_0012.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0012.hea
rename to sample-data/multi-segment/s00001/3975656_0012.hea
diff --git a/sampledata/multisegment/s00001/3975656_0013.dat b/sample-data/multi-segment/s00001/3975656_0013.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0013.dat
rename to sample-data/multi-segment/s00001/3975656_0013.dat
diff --git a/sampledata/multisegment/s00001/3975656_0013.hea b/sample-data/multi-segment/s00001/3975656_0013.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0013.hea
rename to sample-data/multi-segment/s00001/3975656_0013.hea
diff --git a/sampledata/multisegment/s00001/3975656_0014.dat b/sample-data/multi-segment/s00001/3975656_0014.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0014.dat
rename to sample-data/multi-segment/s00001/3975656_0014.dat
diff --git a/sampledata/multisegment/s00001/3975656_0014.hea b/sample-data/multi-segment/s00001/3975656_0014.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0014.hea
rename to sample-data/multi-segment/s00001/3975656_0014.hea
diff --git a/sampledata/multisegment/s00001/3975656_0015.dat b/sample-data/multi-segment/s00001/3975656_0015.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0015.dat
rename to sample-data/multi-segment/s00001/3975656_0015.dat
diff --git a/sampledata/multisegment/s00001/3975656_0015.hea b/sample-data/multi-segment/s00001/3975656_0015.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0015.hea
rename to sample-data/multi-segment/s00001/3975656_0015.hea
diff --git a/sampledata/multisegment/s00001/3975656_0016.dat b/sample-data/multi-segment/s00001/3975656_0016.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0016.dat
rename to sample-data/multi-segment/s00001/3975656_0016.dat
diff --git a/sampledata/multisegment/s00001/3975656_0016.hea b/sample-data/multi-segment/s00001/3975656_0016.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_0016.hea
rename to sample-data/multi-segment/s00001/3975656_0016.hea
diff --git a/sampledata/multisegment/s00001/3975656_layout.hea b/sample-data/multi-segment/s00001/3975656_layout.hea
similarity index 100%
rename from sampledata/multisegment/s00001/3975656_layout.hea
rename to sample-data/multi-segment/s00001/3975656_layout.hea
diff --git a/sampledata/multisegment/s00001/3975656n.dat b/sample-data/multi-segment/s00001/3975656n.dat
similarity index 100%
rename from sampledata/multisegment/s00001/3975656n.dat
rename to sample-data/multi-segment/s00001/3975656n.dat
diff --git a/sampledata/multisegment/s00001/s00001-2896-10-09-01-56.hea b/sample-data/multi-segment/s00001/s00001-2896-10-09-01-56.hea
similarity index 100%
rename from sampledata/multisegment/s00001/s00001-2896-10-09-01-56.hea
rename to sample-data/multi-segment/s00001/s00001-2896-10-09-01-56.hea
diff --git a/sampledata/multisegment/s00001/s00001-2896-10-09-01-56n.hea b/sample-data/multi-segment/s00001/s00001-2896-10-09-01-56n.hea
similarity index 100%
rename from sampledata/multisegment/s00001/s00001-2896-10-09-01-56n.hea
rename to sample-data/multi-segment/s00001/s00001-2896-10-09-01-56n.hea
diff --git a/sampledata/multisegment/s00001/s00001-2896-10-10-00-31.hea b/sample-data/multi-segment/s00001/s00001-2896-10-10-00-31.hea
similarity index 100%
rename from sampledata/multisegment/s00001/s00001-2896-10-10-00-31.hea
rename to sample-data/multi-segment/s00001/s00001-2896-10-10-00-31.hea
diff --git a/sampledata/multisegment/s00001/s00001-2896-10-10-00-31n.hea b/sample-data/multi-segment/s00001/s00001-2896-10-10-00-31n.hea
similarity index 100%
rename from sampledata/multisegment/s00001/s00001-2896-10-10-00-31n.hea
rename to sample-data/multi-segment/s00001/s00001-2896-10-10-00-31n.hea
diff --git a/sampledata/multisegment/s25047/3234460_0001.dat b/sample-data/multi-segment/s25047/3234460_0001.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0001.dat
rename to sample-data/multi-segment/s25047/3234460_0001.dat
diff --git a/sampledata/multisegment/s25047/3234460_0001.hea b/sample-data/multi-segment/s25047/3234460_0001.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0001.hea
rename to sample-data/multi-segment/s25047/3234460_0001.hea
diff --git a/sampledata/multisegment/s25047/3234460_0002.dat b/sample-data/multi-segment/s25047/3234460_0002.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0002.dat
rename to sample-data/multi-segment/s25047/3234460_0002.dat
diff --git a/sampledata/multisegment/s25047/3234460_0002.hea b/sample-data/multi-segment/s25047/3234460_0002.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0002.hea
rename to sample-data/multi-segment/s25047/3234460_0002.hea
diff --git a/sampledata/multisegment/s25047/3234460_0003.dat b/sample-data/multi-segment/s25047/3234460_0003.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0003.dat
rename to sample-data/multi-segment/s25047/3234460_0003.dat
diff --git a/sampledata/multisegment/s25047/3234460_0003.hea b/sample-data/multi-segment/s25047/3234460_0003.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0003.hea
rename to sample-data/multi-segment/s25047/3234460_0003.hea
diff --git a/sampledata/multisegment/s25047/3234460_0004.dat b/sample-data/multi-segment/s25047/3234460_0004.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0004.dat
rename to sample-data/multi-segment/s25047/3234460_0004.dat
diff --git a/sampledata/multisegment/s25047/3234460_0004.hea b/sample-data/multi-segment/s25047/3234460_0004.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0004.hea
rename to sample-data/multi-segment/s25047/3234460_0004.hea
diff --git a/sampledata/multisegment/s25047/3234460_0005.dat b/sample-data/multi-segment/s25047/3234460_0005.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0005.dat
rename to sample-data/multi-segment/s25047/3234460_0005.dat
diff --git a/sampledata/multisegment/s25047/3234460_0005.hea b/sample-data/multi-segment/s25047/3234460_0005.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0005.hea
rename to sample-data/multi-segment/s25047/3234460_0005.hea
diff --git a/sampledata/multisegment/s25047/3234460_0006.dat b/sample-data/multi-segment/s25047/3234460_0006.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0006.dat
rename to sample-data/multi-segment/s25047/3234460_0006.dat
diff --git a/sampledata/multisegment/s25047/3234460_0006.hea b/sample-data/multi-segment/s25047/3234460_0006.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0006.hea
rename to sample-data/multi-segment/s25047/3234460_0006.hea
diff --git a/sampledata/multisegment/s25047/3234460_0007.dat b/sample-data/multi-segment/s25047/3234460_0007.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0007.dat
rename to sample-data/multi-segment/s25047/3234460_0007.dat
diff --git a/sampledata/multisegment/s25047/3234460_0007.hea b/sample-data/multi-segment/s25047/3234460_0007.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0007.hea
rename to sample-data/multi-segment/s25047/3234460_0007.hea
diff --git a/sampledata/multisegment/s25047/3234460_0008.dat b/sample-data/multi-segment/s25047/3234460_0008.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0008.dat
rename to sample-data/multi-segment/s25047/3234460_0008.dat
diff --git a/sampledata/multisegment/s25047/3234460_0008.hea b/sample-data/multi-segment/s25047/3234460_0008.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0008.hea
rename to sample-data/multi-segment/s25047/3234460_0008.hea
diff --git a/sampledata/multisegment/s25047/3234460_0009.dat b/sample-data/multi-segment/s25047/3234460_0009.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0009.dat
rename to sample-data/multi-segment/s25047/3234460_0009.dat
diff --git a/sampledata/multisegment/s25047/3234460_0009.hea b/sample-data/multi-segment/s25047/3234460_0009.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0009.hea
rename to sample-data/multi-segment/s25047/3234460_0009.hea
diff --git a/sampledata/multisegment/s25047/3234460_0010.dat b/sample-data/multi-segment/s25047/3234460_0010.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0010.dat
rename to sample-data/multi-segment/s25047/3234460_0010.dat
diff --git a/sampledata/multisegment/s25047/3234460_0010.hea b/sample-data/multi-segment/s25047/3234460_0010.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0010.hea
rename to sample-data/multi-segment/s25047/3234460_0010.hea
diff --git a/sampledata/multisegment/s25047/3234460_0011.dat b/sample-data/multi-segment/s25047/3234460_0011.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0011.dat
rename to sample-data/multi-segment/s25047/3234460_0011.dat
diff --git a/sampledata/multisegment/s25047/3234460_0011.hea b/sample-data/multi-segment/s25047/3234460_0011.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0011.hea
rename to sample-data/multi-segment/s25047/3234460_0011.hea
diff --git a/sampledata/multisegment/s25047/3234460_0012.dat b/sample-data/multi-segment/s25047/3234460_0012.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0012.dat
rename to sample-data/multi-segment/s25047/3234460_0012.dat
diff --git a/sampledata/multisegment/s25047/3234460_0012.hea b/sample-data/multi-segment/s25047/3234460_0012.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0012.hea
rename to sample-data/multi-segment/s25047/3234460_0012.hea
diff --git a/sampledata/multisegment/s25047/3234460_0013.dat b/sample-data/multi-segment/s25047/3234460_0013.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0013.dat
rename to sample-data/multi-segment/s25047/3234460_0013.dat
diff --git a/sampledata/multisegment/s25047/3234460_0013.hea b/sample-data/multi-segment/s25047/3234460_0013.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0013.hea
rename to sample-data/multi-segment/s25047/3234460_0013.hea
diff --git a/sampledata/multisegment/s25047/3234460_0014.dat b/sample-data/multi-segment/s25047/3234460_0014.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0014.dat
rename to sample-data/multi-segment/s25047/3234460_0014.dat
diff --git a/sampledata/multisegment/s25047/3234460_0014.hea b/sample-data/multi-segment/s25047/3234460_0014.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0014.hea
rename to sample-data/multi-segment/s25047/3234460_0014.hea
diff --git a/sampledata/multisegment/s25047/3234460_0015.dat b/sample-data/multi-segment/s25047/3234460_0015.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0015.dat
rename to sample-data/multi-segment/s25047/3234460_0015.dat
diff --git a/sampledata/multisegment/s25047/3234460_0015.hea b/sample-data/multi-segment/s25047/3234460_0015.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0015.hea
rename to sample-data/multi-segment/s25047/3234460_0015.hea
diff --git a/sampledata/multisegment/s25047/3234460_0016.dat b/sample-data/multi-segment/s25047/3234460_0016.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0016.dat
rename to sample-data/multi-segment/s25047/3234460_0016.dat
diff --git a/sampledata/multisegment/s25047/3234460_0016.hea b/sample-data/multi-segment/s25047/3234460_0016.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0016.hea
rename to sample-data/multi-segment/s25047/3234460_0016.hea
diff --git a/sampledata/multisegment/s25047/3234460_0017.dat b/sample-data/multi-segment/s25047/3234460_0017.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0017.dat
rename to sample-data/multi-segment/s25047/3234460_0017.dat
diff --git a/sampledata/multisegment/s25047/3234460_0017.hea b/sample-data/multi-segment/s25047/3234460_0017.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0017.hea
rename to sample-data/multi-segment/s25047/3234460_0017.hea
diff --git a/sampledata/multisegment/s25047/3234460_0018.dat b/sample-data/multi-segment/s25047/3234460_0018.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0018.dat
rename to sample-data/multi-segment/s25047/3234460_0018.dat
diff --git a/sampledata/multisegment/s25047/3234460_0018.hea b/sample-data/multi-segment/s25047/3234460_0018.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_0018.hea
rename to sample-data/multi-segment/s25047/3234460_0018.hea
diff --git a/sampledata/multisegment/s25047/3234460_layout.hea b/sample-data/multi-segment/s25047/3234460_layout.hea
similarity index 100%
rename from sampledata/multisegment/s25047/3234460_layout.hea
rename to sample-data/multi-segment/s25047/3234460_layout.hea
diff --git a/sampledata/multisegment/s25047/3234460n.dat b/sample-data/multi-segment/s25047/3234460n.dat
similarity index 100%
rename from sampledata/multisegment/s25047/3234460n.dat
rename to sample-data/multi-segment/s25047/3234460n.dat
diff --git a/sampledata/multisegment/s25047/s25047-2704-05-04-10-44.hea b/sample-data/multi-segment/s25047/s25047-2704-05-04-10-44.hea
similarity index 100%
rename from sampledata/multisegment/s25047/s25047-2704-05-04-10-44.hea
rename to sample-data/multi-segment/s25047/s25047-2704-05-04-10-44.hea
diff --git a/sampledata/multisegment/s25047/s25047-2704-05-04-10-44n.hea b/sample-data/multi-segment/s25047/s25047-2704-05-04-10-44n.hea
similarity index 100%
rename from sampledata/multisegment/s25047/s25047-2704-05-04-10-44n.hea
rename to sample-data/multi-segment/s25047/s25047-2704-05-04-10-44n.hea
diff --git a/sampledata/p10143.dat b/sample-data/p10143.dat
similarity index 100%
rename from sampledata/p10143.dat
rename to sample-data/p10143.dat
diff --git a/sampledata/p10143.hea b/sample-data/p10143.hea
similarity index 100%
rename from sampledata/p10143.hea
rename to sample-data/p10143.hea
diff --git a/sampledata/s0010_re.dat b/sample-data/s0010_re.dat
similarity index 100%
rename from sampledata/s0010_re.dat
rename to sample-data/s0010_re.dat
diff --git a/sampledata/s0010_re.hea b/sample-data/s0010_re.hea
similarity index 100%
rename from sampledata/s0010_re.hea
rename to sample-data/s0010_re.hea
diff --git a/sampledata/s0010_re.xyz b/sample-data/s0010_re.xyz
similarity index 100%
rename from sampledata/s0010_re.xyz
rename to sample-data/s0010_re.xyz
diff --git a/sampledata/test01_00s.dat b/sample-data/test01_00s.dat
similarity index 100%
rename from sampledata/test01_00s.dat
rename to sample-data/test01_00s.dat
diff --git a/sampledata/test01_00s.hea b/sample-data/test01_00s.hea
similarity index 100%
rename from sampledata/test01_00s.hea
rename to sample-data/test01_00s.hea
diff --git a/sampledata/test01_00s_frame.hea b/sample-data/test01_00s_frame.hea
similarity index 100%
rename from sampledata/test01_00s_frame.hea
rename to sample-data/test01_00s_frame.hea
diff --git a/sampledata/test01_00s_skew.hea b/sample-data/test01_00s_skew.hea
similarity index 100%
rename from sampledata/test01_00s_skew.hea
rename to sample-data/test01_00s_skew.hea
diff --git a/sampledata/test01_00s_skewframe.hea b/sample-data/test01_00s_skewframe.hea
similarity index 100%
rename from sampledata/test01_00s_skewframe.hea
rename to sample-data/test01_00s_skewframe.hea
diff --git a/sampledata/v102s.dat b/sample-data/v102s.dat
similarity index 100%
rename from sampledata/v102s.dat
rename to sample-data/v102s.dat
diff --git a/sampledata/v102s.hea b/sample-data/v102s.hea
similarity index 100%
rename from sampledata/v102s.hea
rename to sample-data/v102s.hea
diff --git a/setup.py b/setup.py
index 27e9caea..f0988f61 100644
--- a/setup.py
+++ b/setup.py
@@ -62,6 +62,7 @@
'requests>=2.10.0',
'pandas>=0.19.1',
'scipy>=0.19.0',
+ 'sklearn>=0.0',
],
# List additional groups of dependencies here (e.g. development
diff --git a/tests/targetoutputdata/anntarget1 b/tests/target-output/anntarget1
similarity index 100%
rename from tests/targetoutputdata/anntarget1
rename to tests/target-output/anntarget1
diff --git a/tests/targetoutputdata/anntarget2 b/tests/target-output/anntarget2
similarity index 100%
rename from tests/targetoutputdata/anntarget2
rename to tests/target-output/anntarget2
diff --git a/tests/targetoutputdata/anntarget3 b/tests/target-output/anntarget3
similarity index 100%
rename from tests/targetoutputdata/anntarget3
rename to tests/target-output/anntarget3
diff --git a/tests/targetoutputdata/target1a b/tests/target-output/target1a
similarity index 100%
rename from tests/targetoutputdata/target1a
rename to tests/target-output/target1a
diff --git a/tests/targetoutputdata/target1b b/tests/target-output/target1b
similarity index 100%
rename from tests/targetoutputdata/target1b
rename to tests/target-output/target1b
diff --git a/tests/targetoutputdata/target1c b/tests/target-output/target1c
similarity index 100%
rename from tests/targetoutputdata/target1c
rename to tests/target-output/target1c
diff --git a/tests/targetoutputdata/target1d b/tests/target-output/target1d
similarity index 100%
rename from tests/targetoutputdata/target1d
rename to tests/target-output/target1d
diff --git a/tests/targetoutputdata/target2a b/tests/target-output/target2a
similarity index 100%
rename from tests/targetoutputdata/target2a
rename to tests/target-output/target2a
diff --git a/tests/targetoutputdata/target2b b/tests/target-output/target2b
similarity index 100%
rename from tests/targetoutputdata/target2b
rename to tests/target-output/target2b
diff --git a/tests/targetoutputdata/target2c b/tests/target-output/target2c
similarity index 100%
rename from tests/targetoutputdata/target2c
rename to tests/target-output/target2c
diff --git a/tests/targetoutputdata/target2d b/tests/target-output/target2d
similarity index 100%
rename from tests/targetoutputdata/target2d
rename to tests/target-output/target2d
diff --git a/tests/targetoutputdata/target2e b/tests/target-output/target2e
similarity index 100%
rename from tests/targetoutputdata/target2e
rename to tests/target-output/target2e
diff --git a/tests/targetoutputdata/target3a b/tests/target-output/target3a
similarity index 100%
rename from tests/targetoutputdata/target3a
rename to tests/target-output/target3a
diff --git a/tests/targetoutputdata/target3b b/tests/target-output/target3b
similarity index 100%
rename from tests/targetoutputdata/target3b
rename to tests/target-output/target3b
diff --git a/tests/targetoutputdata/target4a b/tests/target-output/target4a
similarity index 100%
rename from tests/targetoutputdata/target4a
rename to tests/target-output/target4a
diff --git a/tests/targetoutputdata/target4b b/tests/target-output/target4b
similarity index 100%
rename from tests/targetoutputdata/target4b
rename to tests/target-output/target4b
diff --git a/tests/targetoutputdata/target4c b/tests/target-output/target4c
similarity index 100%
rename from tests/targetoutputdata/target4c
rename to tests/target-output/target4c
diff --git a/tests/targetoutputdata/target4d b/tests/target-output/target4d
similarity index 100%
rename from tests/targetoutputdata/target4d
rename to tests/target-output/target4d
diff --git a/tests/targetoutputdata/target5a b/tests/target-output/target5a
similarity index 100%
rename from tests/targetoutputdata/target5a
rename to tests/target-output/target5a
diff --git a/tests/targetoutputdata/target5b b/tests/target-output/target5b
similarity index 100%
rename from tests/targetoutputdata/target5b
rename to tests/target-output/target5b
diff --git a/tests/targetoutputdata/target5c b/tests/target-output/target5c
similarity index 100%
rename from tests/targetoutputdata/target5c
rename to tests/target-output/target5c
diff --git a/tests/targetoutputdata/target5d b/tests/target-output/target5d
similarity index 100%
rename from tests/targetoutputdata/target5d
rename to tests/target-output/target5d
diff --git a/tests/test_annotations.py b/tests/test_annotation.py
similarity index 85%
rename from tests/test_annotations.py
rename to tests/test_annotation.py
index cf52deac..0cb95c1c 100644
--- a/tests/test_annotations.py
+++ b/tests/test_annotation.py
@@ -2,14 +2,14 @@
import re
import wfdb
-class test_rdann():
+class test_annotation():
# Test 1 - Annotation file 100.atr
- # Target file created with: rdann -r sampledata/100 -a atr > anntarget1
+ # Target file created with: rdann -r sample-data/100 -a atr > anntarget1
def test_1(self):
# Read data using WFDB python package
- annotation = wfdb.rdann('sampledata/100', 'atr')
+ annotation = wfdb.rdann('sample-data/100', 'atr')
# This is not the fault of the script. The annotation file specifies a
@@ -20,7 +20,7 @@ def test_1(self):
# no null to detect in the output text file of rdann.
# Target data from WFDB software package
- lines = tuple(open('tests/targetoutputdata/anntarget1', 'r'))
+ lines = tuple(open('tests/target-output/anntarget1', 'r'))
nannot = len(lines)
target_time = [None] * nannot
@@ -53,12 +53,12 @@ def test_1(self):
annotation.aux_note == target_aux_note]
# Test file streaming
- pbannotation = wfdb.rdann('100', 'atr', pbdir = 'mitdb', return_label_elements=['label_store', 'symbol'])
+ pbannotation = wfdb.rdann('100', 'atr', pb_dir='mitdb', return_label_elements=['label_store', 'symbol'])
pbannotation.aux_note[0] = '(N'
pbannotation.create_label_map()
# Test file writing
- annotation.wrann(writefs=True)
+ annotation.wrann(write_fs=True)
writeannotation = wfdb.rdann('100', 'atr', return_label_elements=['label_store', 'symbol'])
writeannotation.create_label_map()
@@ -67,14 +67,14 @@ def test_1(self):
assert annotation.__eq__(writeannotation)
# Test 2 - Annotation file 12726.anI with many aux_note strings.
- # Target file created with: rdann -r sampledata/100 -a atr > anntarget2
+ # Target file created with: rdann -r sample-data/100 -a atr > anntarget2
def test_2(self):
# Read data from WFDB python package
- annotation = wfdb.rdann('sampledata/12726', 'anI')
+ annotation = wfdb.rdann('sample-data/12726', 'anI')
# Target data from WFDB software package
- lines = tuple(open('tests/targetoutputdata/anntarget2', 'r'))
+ lines = tuple(open('tests/target-output/anntarget2', 'r'))
nannot = len(lines)
target_time = [None] * nannot
@@ -106,11 +106,11 @@ def test_2(self):
np.array_equal(annotation.num, target_num),
annotation.aux_note == target_aux_note]
# Test file streaming
- pbannotation = wfdb.rdann('12726', 'anI', pbdir = 'prcp', return_label_elements=['label_store', 'symbol'])
+ pbannotation = wfdb.rdann('12726', 'anI', pb_dir='prcp', return_label_elements=['label_store', 'symbol'])
pbannotation.create_label_map()
# Test file writing
- annotation.wrann(writefs=True)
+ annotation.wrann(write_fs=True)
writeannotation = wfdb.rdann('12726', 'anI', return_label_elements=['label_store', 'symbol'])
writeannotation.create_label_map()
@@ -119,14 +119,14 @@ def test_2(self):
assert annotation.__eq__(writeannotation)
# Test 3 - Annotation file 1003.atr with custom annotation types
- # Target file created with: rdann -r sampledata/1003 -a atr > anntarget3
+ # Target file created with: rdann -r sample-data/1003 -a atr > anntarget3
def test_3(self):
# Read data using WFDB python package
- annotation = wfdb.rdann('sampledata/1003', 'atr')
+ annotation = wfdb.rdann('sample-data/1003', 'atr')
# Target data from WFDB software package
- lines = tuple(open('tests/targetoutputdata/anntarget3', 'r'))
+ lines = tuple(open('tests/target-output/anntarget3', 'r'))
nannot = len(lines)
target_time = [None] * nannot
@@ -159,11 +159,11 @@ def test_3(self):
annotation.aux_note == target_aux_note]
# Test file streaming
- pbannotation = wfdb.rdann('1003', 'atr', pbdir = 'challenge/2014/set-p2', return_label_elements=['label_store', 'symbol'])
+ pbannotation = wfdb.rdann('1003', 'atr', pb_dir='challenge/2014/set-p2', return_label_elements=['label_store', 'symbol'])
pbannotation.create_label_map()
# Test file writing
- annotation.wrann(writefs=True)
+ annotation.wrann(write_fs=True)
writeannotation = wfdb.rdann('1003', 'atr', return_label_elements=['label_store', 'symbol'])
writeannotation.create_label_map()
diff --git a/tests/test_io.py b/tests/test_io.py
new file mode 100644
index 00000000..bcbaa0bc
--- /dev/null
+++ b/tests/test_io.py
@@ -0,0 +1,393 @@
+import wfdb
+import numpy as np
+import os
+
+
+class test_record():
+ # Target files created using the original WFDB Software Package
+ # version 10.5.24
+
+ # ----------------------- 1. Basic Tests -----------------------#
+
+ # Format 16/Entire signal/Digital
+ # Target file created with: rdsamp -r sample-data/test01_00s | cut -f 2- >
+ # target1a
+ def test_1a(self):
+ record = wfdb.rdrecord('sample-data/test01_00s', physical=False)
+ sig = record.d_signal
+ targetsig = np.genfromtxt('tests/target-output/target1a')
+
+ # Compare data streaming from physiobank
+ pbrecord = wfdb.rdrecord('test01_00s', physical=False,
+ pb_dir='macecgdb')
+
+ # Test file writing
+ record2 = wfdb.rdrecord('sample-data/test01_00s', physical=False)
+ record2.sig_name = ['ECG_1', 'ECG_2', 'ECG_3', 'ECG_4']
+ record2.wrsamp()
+ recordwrite = wfdb.rdrecord('test01_00s', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(pbrecord)
+ assert record2.__eq__(recordwrite)
+
+ # Format 16 with byte offset/Selected Duration/Selected Channels/Physical
+ # Target file created with: rdsamp -r sample-data/a103l -f 50 -t 160 -s 2 0
+ # -P | cut -f 2- > target1b
+ def test_1b(self):
+ sig, fields = wfdb.rdsamp('sample-data/a103l',
+ sampfrom=12500, sampto=40000, channels=[2, 0])
+ siground = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target1b')
+
+ # Compare data streaming from physiobank
+ pbsig, pbfields = wfdb.rdsamp('a103l',
+ pb_dir='challenge/2015/training',
+ sampfrom=12500, sampto=40000,
+ channels=[2, 0])
+ assert np.array_equal(siground, targetsig)
+ assert np.array_equal(sig, pbsig) and fields == pbfields
+
+ # Format 16 with byte offset/Selected Duration/Selected Channels/Digital
+ # Target file created with: rdsamp -r sample-data/a103l -f 80 -s 0 1 | cut
+ # -f 2- > target1c
+ def test_1c(self):
+ record = wfdb.rdrecord('sample-data/a103l',
+ sampfrom=20000, channels=[0, 1], physical=False)
+ sig = record.d_signal
+ targetsig = np.genfromtxt('tests/target-output/target1c')
+
+ # Compare data streaming from physiobank
+ pbrecord = wfdb.rdrecord('a103l', pb_dir='challenge/2015/training',
+ sampfrom=20000, channels=[0, 1],
+ physical=False)
+
+ # Test file writing
+ record.wrsamp()
+ recordwrite = wfdb.rdrecord('a103l', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(pbrecord)
+ assert record.__eq__(recordwrite)
+
+ # Format 80/Selected Duration/Selected Channels/Physical
+ # Target file created with: rdsamp -r sample-data/3000003_0003 -f 1 -t 8 -s
+ # 1 -P | cut -f 2- > target1d
+ def test_1d(self):
+ sig, fields = wfdb.rdsamp('sample-data/3000003_0003',
+ sampfrom=125, sampto=1000, channels=[1])
+ siground = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target1d')
+ targetsig = targetsig.reshape(len(targetsig), 1)
+
+ # Compare data streaming from physiobank
+ pbsig, pbfields = wfdb.rdsamp('3000003_0003',
+ pb_dir='mimic2wdb/30/3000003/',
+ sampfrom=125, sampto=1000, channels=[1])
+
+ assert np.array_equal(siground, targetsig)
+ assert np.array_equal(sig, pbsig) and fields == pbfields
+
+
+ # -------------------- 2. Special format tests ------------------ #
+
+ # Format 212/Entire signal/Physical
+ # Target file created with: rdsamp -r sample-data/100 -P | cut -f 2- >
+ # target2a
+ def test_2a(self):
+ sig, fields = wfdb.rdsamp('sample-data/100')
+ siground = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target2a')
+
+ # Compare data streaming from physiobank
+ pbsig, pbfields = wfdb.rdsamp('100', pb_dir = 'mitdb')
+ # This comment line was manually added and is not present in the
+ # original physiobank record
+ del(fields['comments'][0])
+
+ assert np.array_equal(siground, targetsig)
+ assert np.array_equal(sig, pbsig) and fields == pbfields
+
+ # Format 212/Selected Duration/Selected Channel/Digital.
+ # Target file created with: rdsamp -r sample-data/100 -f 0.002 -t 30 -s 1 |
+ # cut -f 2- > target2b
+ def test_2b(self):
+ record = wfdb.rdrecord('sample-data/100', sampfrom=1,
+ sampto=10800, channels=[1], physical=False)
+ sig = record.d_signal
+ targetsig = np.genfromtxt('tests/target-output/target2b')
+ targetsig = targetsig.reshape(len(targetsig), 1)
+
+ # Compare data streaming from physiobank
+ pbrecord = wfdb.rdrecord('100', sampfrom=1, sampto=10800, channels=[1],
+ physical=False, pb_dir = 'mitdb')
+ # This comment line was manually added and is not present in the
+ # original physiobank record
+ del(record.comments[0])
+
+ # Test file writing
+ record.wrsamp()
+ recordwrite = wfdb.rdrecord('100', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(pbrecord)
+ assert record.__eq__(recordwrite)
+
+
+ # Format 212/Entire signal/Physical for odd sampled record
+ # Target file created with: rdsamp -r sample-data/100_3chan -P | cut -f 2- >
+ # target2c
+ def test_2c(self):
+ record = wfdb.rdrecord('sample-data/100_3chan')
+ siground = np.round(record.p_signal, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target2c')
+
+ # Test file writing
+ record.d_signal = record.adc()
+ record.wrsamp()
+ recordwrite = wfdb.rdrecord('100_3chan')
+ record.d_signal = None
+
+ assert np.array_equal(siground, targetsig)
+ assert record.__eq__(recordwrite)
+
+
+ # Format 310/Selected Duration/Digital
+ # Target file created with: rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 | cut -f 2- | wrsamp -o 310derive -O 310
+ # rdsamp -r 310derive -f 0.007 | cut -f 2- > target2d
+ def test_2d(self):
+ record = wfdb.rdrecord('sample-data/310derive', sampfrom=2,
+ physical=False)
+ sig = record.d_signal
+ targetsig = np.genfromtxt('tests/target-output/target2d')
+ assert np.array_equal(sig, targetsig)
+
+ # Format 311/Selected Duration/Physical
+ # Target file created with: rdsamp -r sample-data/3000003_0003 -f 0 -t 8.21 -s 1 | cut -f 2- | wrsamp -o 311derive -O 311
+ # rdsamp -r 311derive -f 0.005 -t 3.91 -P | cut -f 2- > target2e
+ def test_2e(self):
+ sig, fields = wfdb.rdsamp('sample-data/311derive', sampfrom=1,
+ sampto=978)
+ sig = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target2e')
+ targetsig = targetsig.reshape([977, 1])
+ assert np.array_equal(sig, targetsig)
+
+
+ # ------------------- 3. Multi-dat file tests -------------------- #
+
+ # Multi-dat/Entire signal/Digital
+ # Target file created with: rdsamp -r sample-data/s0010_re | cut -f 2- >
+ # target3a
+ def test_3a(self):
+ record= wfdb.rdrecord('sample-data/s0010_re', physical=False)
+ sig = record.d_signal
+ targetsig = np.genfromtxt('tests/target-output/target3a')
+
+ # Compare data streaming from physiobank
+ pbrecord= wfdb.rdrecord('s0010_re', physical=False,
+ pb_dir='ptbdb/patient001')
+
+ # Test file writing
+ record.wrsamp()
+ recordwrite = wfdb.rdrecord('s0010_re', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(pbrecord)
+ assert record.__eq__(recordwrite)
+
+ # Multi-dat/Selected Duration/Selected Channels/Physical
+ # Target file created with: rdsamp -r sample-data/s0010_re -f 5 -t 38 -P -s
+ # 13 0 4 8 3 | cut -f 2- > target3b
+ def test_3b(self):
+ sig, fields = wfdb.rdsamp('sample-data/s0010_re', sampfrom=5000,
+ sampto=38000, channels=[13, 0, 4, 8, 3])
+ siground = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target3b')
+
+ # Compare data streaming from physiobank
+ pbsig, pbfields = wfdb.rdsamp('s0010_re', sampfrom=5000,
+ pb_dir='ptbdb/patient001',
+ sampto=38000, channels=[13, 0, 4, 8, 3])
+
+ assert np.array_equal(siground, targetsig)
+ assert np.array_equal(sig, pbsig) and fields == pbfields
+
+
+ # ------------------- 4. Skew and multiple samples/frame tests ------------------- #
+
+ # Format 16 multi-samples per frame and skew digital
+ # Target file created with: rdsamp -r sample-data/test01_00s_skewframe | cut
+ # -f 2- > target4a
+ def test_4a(self):
+ record = wfdb.rdrecord('sample-data/test01_00s_skewframe',
+ physical=False)
+ sig = record.d_signal
+ # The WFDB library rdsamp does not return the final N samples for all
+ # channels due to the skew. The WFDB python rdsamp does return the final
+ # N samples, filling in NANs for end of skewed channels only.
+ sig = sig[:-3, :]
+
+ targetsig = np.genfromtxt('tests/target-output/target4a')
+
+ # Test file writing. Multiple samples per frame and skew.
+ # Have to read all the samples in the record, ignoring skew
+ recordnoskew = wfdb.rdrecord('sample-data/test01_00s_skewframe',
+ physical=False,
+ smooth_frames=False, ignore_skew=True)
+ recordnoskew.wrsamp(expanded=True)
+ # Read the written record
+ recordwrite = wfdb.rdrecord('test01_00s_skewframe', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(recordwrite)
+
+ # Format 12 multi-samples per frame and skew/Entire Signal/Digital
+ # Target file created with: rdsamp -r sample-data/03700181 | cut -f 2- >
+ # target4b
+ def test_4b(self):
+ record = wfdb.rdrecord('sample-data/03700181', physical=False)
+ sig = record.d_signal
+ # The WFDB library rdsamp does not return the final N samples for all
+ # channels due to the skew.
+ sig = sig[:-4, :]
+ # The WFDB python rdsamp does return the final N samples, filling in
+ # NANs for end of skewed channels only.
+ targetsig = np.genfromtxt('tests/target-output/target4b')
+
+ # Compare data streaming from physiobank
+ pbrecord = wfdb.rdrecord('03700181', physical=False,
+ pb_dir='mimicdb/037')
+
+ # Test file writing. Multiple samples per frame and skew.
+ # Have to read all the samples in the record, ignoring skew
+ recordnoskew = wfdb.rdrecord('sample-data/03700181', physical=False,
+ smooth_frames=False, ignore_skew=True)
+ recordnoskew.wrsamp(expanded=True)
+ # Read the written record
+ recordwrite = wfdb.rdrecord('03700181', physical=False)
+
+ assert np.array_equal(sig, targetsig)
+ assert record.__eq__(pbrecord)
+ assert record.__eq__(recordwrite)
+
+ # Format 12 multi-samples per frame and skew/Selected Duration/Selected Channels/Physical
+ # Target file created with: rdsamp -r sample-data/03700181 -f 8 -t 128 -s 0
+ # 2 -P | cut -f 2- > target4c
+ def test_4c(self):
+ sig, fields = wfdb.rdsamp('sample-data/03700181',
+ channels=[0, 2], sampfrom=1000, sampto=16000)
+ siground = np.round(sig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target4c')
+
+ # Compare data streaming from physiobank
+ pbsig, pbfields = wfdb.rdsamp('03700181', pb_dir = 'mimicdb/037',
+ channels=[0, 2], sampfrom=1000, sampto=16000)
+
+ # Test file writing. Multiple samples per frame and skew.
+ # Have to read all the samples in the record, ignoring skew
+ recordnoskew = wfdb.rdrecord('sample-data/03700181', physical=False,
+ smooth_frames=False, ignore_skew=True)
+ recordnoskew.wrsamp(expanded=True)
+ # Read the written record
+ writesig, writefields = wfdb.rdsamp('03700181', channels=[0, 2],
+ sampfrom=1000, sampto=16000)
+
+ assert np.array_equal(siground, targetsig)
+ assert np.array_equal(sig, pbsig) and fields == pbfields
+ assert np.array_equal(sig, writesig) and fields == writefields
+
+
+ # Format 16 multi-samples per frame and skew, read expanded signals
+ # Target file created with: rdsamp -r sample-data/test01_00s_skewframe -P -H | cut
+ # -f 2- > target4d
+ def test_4d(self):
+ record = wfdb.rdrecord('sample-data/test01_00s_skewframe',
+ smooth_frames=False)
+
+ # Upsample the channels with lower samples/frame
+ expandsig = np.zeros((7994, 3))
+ expandsig[:,0] = np.repeat(record.e_p_signal[0][:-3],2)
+ expandsig[:,1] = record.e_p_signal[1][:-6]
+ expandsig[:,2] = np.repeat(record.e_p_signal[2][:-3],2)
+
+ siground = np.round(expandsig, decimals=8)
+ targetsig = np.genfromtxt('tests/target-output/target4d')
+
+ assert np.array_equal(siground, targetsig)
+
+
+ # ------------------------ 5. Multi-segment Tests ------------------------ #
+
+ # Multi-segment variable layout/Selected duration. All samples contained in one segment.
+ # Target file created with:
+ # rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428365 -t s14428375 -P | cut -f 2- > target5a
+ def test_5a(self):
+ record=wfdb.rdrecord('sample-data/multi-segment/s00001/s00001-2896-10-10-00-31',
+ sampfrom=14428365, sampto=14428375)
+ siground=np.round(record.p_signal, decimals=8)
+ targetsig=np.genfromtxt('tests/target-output/target5a')
+
+ np.testing.assert_equal(siground, targetsig)
+
+ # Multi-segment variable layout/Selected duration. Samples read from >1 segment
+ # Target file created with:
+ # rdsamp -r sample-data/multi-segment/s00001/s00001-2896-10-10-00-31 -f s14428364 -t s14428375 -P | cut -f 2- > target5b
+ def test_5b(self):
+ record=wfdb.rdrecord('sample-data/multi-segment/s00001/s00001-2896-10-10-00-31',
+ sampfrom=14428364, sampto=14428375)
+ siground=np.round(record.p_signal, decimals=8)
+ targetsig=np.genfromtxt('tests/target-output/target5b')
+
+ np.testing.assert_equal(siground, targetsig)
+
+ # Multi-segment fixed layout entire signal.
+ # Target file created with: rdsamp -r sample-data/multi-segment/fixed1/v102s -P | cut -f 2- > target5c
+ def test_5c(self):
+ record=wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s')
+ siground=np.round(record.p_signal, decimals=8)
+ targetsig=np.genfromtxt('tests/target-output/target5c')
+
+ np.testing.assert_equal(siground, targetsig)
+
+ # Multi-segment fixed layout/selected duration. All samples contained in one segment
+ # Target file created with: rdsamp -r sample-data/multi-segment/fixed1/v102s -t s75000 -P | cut -f 2- > target5d
+ def test_5d(self):
+ record=wfdb.rdrecord('sample-data/multi-segment/fixed1/v102s',
+ sampto=75000)
+ siground=np.round(record.p_signal, decimals=8)
+ targetsig=np.genfromtxt('tests/target-output/target5d')
+
+ np.testing.assert_equal(siground, targetsig)
+
+ # Test 11 - Multi-segment variable layout/Entire signal/Physical
+ # Target file created with: rdsamp -r sample-data/matched/s25047/s25047-2704-05-04-10-44 -P | cut -f 2- > target11
+ # def test_11(self):
+ #sig, fields=rdsamp('sample-data/matched/s25047/s25047-2704-05-04-10-44')
+ #sig=np.round(sig, decimals=8)
+ # targetsig=np.genfromtxt('tests/target-output/target11')
+ #assert np.array_equal(sig, targetsig)
+
+ # Test 12 - Multi-segment variable layout/Selected duration/Selected Channels/Physical
+ # Target file created with: rdsamp -r sample-data/matched/s00001/s00001-2896-10-10-00-31 -f s -t 4000 -s 3 0 -P | cut -f 2- > target12
+ #def test_12(self):
+ # record=rdsamp('sample-data/matched/s00001/s00001-2896-10-10-00-31', sampfrom=8750, sampto=500000)
+ # siground=np.round(record.p_signal, decimals=8)
+ # targetsig=np.genfromtxt('tests/target-output/target12')
+ #
+ # assert np.array_equal(sig, targetsig)
+
+
+ # Cleanup written files
+ @classmethod
+ def tearDownClass(self):
+
+ writefiles = ['03700181.dat','03700181.hea','100.atr','100.dat',
+ '100.hea','1003.atr','100_3chan.dat','100_3chan.hea',
+ '12726.anI','a103l.hea','a103l.mat','s0010_re.dat',
+ 's0010_re.hea','s0010_re.xyz','test01_00s.dat',
+ 'test01_00s.hea','test01_00s_skewframe.hea']
+
+ for file in writefiles:
+ if os.path.isfile(file):
+ os.remove(file)
diff --git a/tests/test_processing.py b/tests/test_processing.py
index 27f11d82..a236d7bb 100644
--- a/tests/test_processing.py
+++ b/tests/test_processing.py
@@ -1,81 +1,122 @@
-import wfdb
import numpy
-class test_processing():
+import wfdb
+from wfdb import processing
+
- def test_1(self):
- sig, fields = wfdb.srdsamp('sampledata/100')
- ann = wfdb.rdann('sampledata/100', 'atr')
+class test_processing():
+ """
+ Test processing functions
+ """
+ def test_resample_single(self):
+ sig, fields = wfdb.rdsamp('sample-data/100')
+ ann = wfdb.rdann('sample-data/100', 'atr')
fs = fields['fs']
fs_target = 50
- new_sig, new_ann = wfdb.processing.resample_singlechan(sig[:, 0], ann, fs, fs_target)
+ new_sig, new_ann = processing.resample_singlechan(sig[:, 0], ann, fs, fs_target)
expected_length = int(sig.shape[0]*fs_target/fs)
assert new_sig.shape[0] == expected_length
- def test_2(self):
- sig, fields = wfdb.srdsamp('sampledata/100')
- ann = wfdb.rdann('sampledata/100', 'atr')
+ def test_resample_multi(self):
+ sig, fields = wfdb.rdsamp('sample-data/100')
+ ann = wfdb.rdann('sample-data/100', 'atr')
fs = fields['fs']
fs_target = 50
- new_sig, new_ann = wfdb.processing.resample_multichan(sig, ann, fs, fs_target)
+ new_sig, new_ann = processing.resample_multichan(sig, ann, fs, fs_target)
expected_length = int(sig.shape[0]*fs_target/fs)
assert new_sig.shape[0] == expected_length
assert new_sig.shape[1] == sig.shape[1]
- def test_3(self):
- sig, _ = wfdb.srdsamp('sampledata/100')
+ def test_normalize_bound(self):
+ sig, _ = wfdb.rdsamp('sample-data/100')
lb = -5
ub = 15
- x = wfdb.processing.normalize(sig[:, 0], lb, ub)
+ x = processing.normalize_bound(sig[:, 0], lb, ub)
assert x.shape[0] == sig.shape[0]
assert numpy.min(x) >= lb
assert numpy.max(x) <= ub
- def test_4(self):
+ def test_find_peaks(self):
+ x = [0, 2, 1, 0, -10, -15, -15, -15, 9, 8, 0, 0, 1, 2, 10]
+ hp, sp = processing.find_peaks(x)
+ assert numpy.array_equal(hp, [1, 8])
+ assert numpy.array_equal(sp, [6, 10])
+
+ def test_find_peaks_empty(self):
x = []
- hp, sp = wfdb.processing.find_peaks(x)
+ hp, sp = processing.find_peaks(x)
assert hp.shape == (0,)
assert sp.shape == (0,)
- def test_5(self):
- x = [0, 2, 1, 0, -10, -15, -15, -15, 9, 8, 0, 0, 1, 2, 10]
- hp, sp = wfdb.processing.find_peaks(x)
- assert numpy.array_equal(hp, [1, 8])
- assert numpy.array_equal(sp, [6, 10])
+ def test_gqrs(self):
+
+ record = wfdb.rdrecord('sample-data/100', channels=[0],
+ sampfrom=9998, sampto=19998, physical=False)
+
+ expected_peaks = [271, 580, 884, 1181, 1469, 1770, 2055, 2339, 2634,
+ 2939, 3255, 3551, 3831, 4120, 4412, 4700, 5000, 5299,
+ 5596, 5889, 6172, 6454, 6744, 7047, 7347, 7646, 7936,
+ 8216, 8503, 8785, 9070, 9377, 9682]
+
+ peaks = processing.gqrs_detect(d_sig=record.d_signal[:,0],
+ fs=record.fs,
+ adc_gain=record.adc_gain[0],
+ adc_zero=record.adc_zero[0],
+ threshold=1.0)
+
+ assert numpy.array_equal(peaks, expected_peaks)
- def test_6(self):
- x = [1183,1168,1111,1037,974,935,917,914,922,928,934,934,935,936,933,934,934,936,936,935,934,933,935,937,938,936,933,935,935,936,937,932,932,933,931,934,935,930,927,931,932,932,934,934,930,932,933,936,937,935,934,937,936,935,935,934,934,936,937,936,935,933,929,932,934,937,936,935,933,934,937,937,934,934,932,933,936,938,939,937,932,931,935,936,936,930,928,926,928,929,927,926,925,926,926,926,925,925,922,925,925,929,929,931,929,931,938,942,943,945,943,945,950,953,951,950,950,948,950,953,952,950,950,953,956,959,959,956,955,952,955,955,953,953,951,950,951,950,950,949,949,951,952,953,953,953,949,948,947,951,951,949,948,947,947,950,948,944,942,944,947,949,948,946,942,944,945,946,946,946,944,944,946,947,946,945,945,944,944,947,946,944,944,946,948,950,950,948,943,946,946,948,948,948,946,948,948,949,948,946,942,943,945,945,947,943,943,947,950,957,959,959,955,957,959,960,960,962,962,964,966,968,967,965,963,965,966,967,966,964,961,962,962,966,969,968,964,960,956,956,950,945,940,941,939,941,942,940,939,939,938,941,942,939,936,935,937,940,942,940,937,939,939,941,940,938,937,934,930,926,920,917,913,906,901,900,913,935,959,989,1029,1077,1132,1171,1194,1195,1160,1089,1007,946,920,913,918,926,933,933,931,931,934,936,934,935,931,936,934,937,936,934,932,930,933,933,932,929,927,929,931,933,932,932,932,932,931,934,933,932,928,930,932,931,933,932,930,933,934,933,933,933,930,928,930,933,932,930,928,930,931,933,932,932,931,930,933,935,935,934,933,934,936,939,937,936,934,933,933,934,935,932,932,930,931,932,932,929,926,928,928,929,927,926,924,926,927,926,926,928,924,924,922,928,926,926,927,929,931,935,937,939,939,940,946,948,949,950,950,950,954,957,954,954,953,957,960,960,961,959,957,958,960,960,962,958,954,955,958,958,956,956,953,953,955,957,955,954,952,954,952,954,954,954,951,950,955,958,955,954,953,953,953,956,957,951,947,947,951,951,951,951,947,945,947,949,951,948,946,949,948,951,950,950,948,948,953,951,951,950,948,949,951,951,950,950,946,947,951,952,951,949,947,946,948,950,949,946,947,947,949,950,949,947,943,947,950,950,952,949,950,949,953,956,955,956,955,959,962,963,963,964,963,964,967,971,971,970,968,969,969,971,971,969,965,966,966,970,969,967,964,965,970,976,975,972,965,962,961,959,954,951,947,948,946,950,950,949,948,946,947,946,948,946,944,943,946,947,947,945,941,943,944,947,944,944,940,940,937,934,925,920,914,913,911,908,899,892,902,923,945,976,1007,1047,1101,1156,1197,1215,1206,1156,1078,996,939,920,921,926,931,935,938,937,937,934,933,934,934,936,935,933,932,932,934,934,935,934,931,933,936,937,937,936,932,933,937,936,933,934,932,932,934,935,936,933,931,933,935,936,937,935,933,936,939,938,938,935,934,938,937,939,938,937,934,934,934,937,934,936,933,935,936,937,934,933,933,936,936,940,940,938,935,935,939,939,940,940,936,937,938,939,937,935,932,933,935,937,937,934,933,933,935,937,934,933,935,933,936,938,941,939,938,942,945,948,951,951,950,950,954,956,957,955,953,955,956,958,956,957,953,956,957,960,959,957,953,958,959,959,959,959,954,957,959,960,959,957,957,956,959,959,958,957,955,953,956,955,956,953,952,951,952,953,953,951,950,951,951,953,953,952,950,950,950,952,951,951,947,947,947,949,948,946,944,944,947,947,947,946,944,943,946,948,948,946,943,945,946,947,948,948,946,949,951,951,949,949,947,947,947,951,949,948,946,947,949,952,950,948,946,946,949,951,949,952,950,953,960,962,964,964,962,962,967,970,973,969,969,970,969,970,968,965,962,963,965,967,967,965,961,961,962,964,966,965,967,971,971,966,963,959,953,951,949,948,947,947,943,944,947,947,945,944,940,942,943,944,943,942,939,940,940,944,943,941,938,938,937,933,925,916,912,906,898,898,911,935,959,986,1024,1073,1128,1168,1191,1195,1172,1122,1049,986,939,917,907,907,914,923,928,932,934,936,936,937,934,934,934,935,937,934,933,932,931,937,935,934,934,932,933,935,935,931,930,934,933,939,938,938,933,936,937,939,937,937,935,934,936,936,937,936,936,935,939,940,941,939,935,934,937,939,938,934,934,936,934,939,937,935,934,935,936,939,938,934,934,935,938,938,937,937,935,935,938,939,938,935,937,938,938,939,940,939,936,936,937,937,937,936,933,933,937,939,938,938,936,938,938,940,940,938,939,941,947,949,951,951,950,953,951,954,956,955,955,954,953,955,957,957,955,955,956,958,957,953,952,952,953,955,956,955,954,953,956,956,956,954,949,952,953,956,955,952,947,949,951,952,951,947,945,945,948,948,949,948,944,945,949,949,947,945,944,942,946,947,948,946,941,942,944,945,947,944,940,939,941,942,942,938,938,938,940,940,941,940,940,941,941,944,946,944,942,941,943,944,944,942,940,940,942,944,941,940,938,939,942,945,948,949,950,953,955,957,957,957,957,962,964,963,965,966,964,964,965,965,963,961,958,959,959,960,961,958,953,955,960,965,963,959,949,948,947,945,943,940,937,938,940,943,944,941,937,936,934,938,936,935,934,933,934,937,936,934,932,935,936,937,938,937,932,926,922,919,914,905,895,895,913,937,963,997,1045,1104,1155,1183,1182,1144,1070,995,944,920,912,914,923,931,932,933,933,934,931,932,931,930,930,930,926,927,930,931,932,931,930,928,929,931,933,929,928,932,934,936,935,935,932,933,933,931,933,928,931,931,933,933,932,929,928,930,931,933,931,934,933,934,937,938,936,935,933,933,934,936,937,933,931,933,936,936,936,936,932,933,934,938,937,933,929,929,930,932,930,931,927,926,931,932,932,929,926,924,925,927,930,927,924,924,926,929,929,931,928,933,935,939,942,943,940,940,944,949,952,952,949,952,955,959,959,956,955,952,953,953,953,951,952,953,958,957,954,955,952,953,950,956,957,957,955,954,954,956,955,952,951,952,954,954,954,951,948,949,950,950,951,951,949,948,949,950,949,946,942,944,946,951,950,949,946,947,949,951,949,945,943,943,945,946,944,944,944,943,943,946,946,945,944,942,944,947,949,945,945,945,947,948,947,948,948,948,950,948,950,948,947,944,949,951,955,953,954,954,958,961,962,963,961,961,964,969,968,967,966,967,970,970,969,969,967,966,965,967,967,967,964,964,965,966,970,970,967,964,961,960,958,952,946,945,941,945,944,945,943,941,944,943,944,943,943,942,943,944,942,940,938,939,940,943,940,941,937,935,930,924,920,915,905,899,906,926,956,985,1021,1073,1128,1173,1201,1200,1160,1089,1008,953,924,914,917,927,933,936,935,936,935,936,938,939,941,937,934,935,938,939,938,936,932,933,932,933,936,934,933,932,934,937,938,936,932,932,934,936,937,937,936,936,939,939,938,937,935,935,936,937,933,936,933,935,934,936,936,935,934,935,937,938,939,938,936,937,939,938,941,937,934,936,938,939,940,937,935,936,937,938,936,933,932,930,933,933,931,930,927,927,929,929,929,930,928,929,929,932,931,930,931,931,934,935,938,937,937,939,944,948,951,951,946,951,951,956,955,958,954,957,960,959,960,961,957,958,960,961,962,961,959,958,961,960,960,960,957,955,958,958,959,957,955,957,959,959,957,957,955,955,955,958,959,956,957,957,959,961,961,957,956,956,958,956,959,957,953,954,956,957,956,956,952,951,953,954,955,955,953,949,955,956,953,954,951,954,953,956,954,954,954,953,958,957,957,956,952,954,956,957,955,955,952,952,953,955,954,949,947,952,953,956,955,954,951,952,955,956,956,956,955,957,959,966,968,967,965,966,969,969,972,974,972,975,978,978,976,976,974,971,974,975,977,974,968,970,976,980,982,977,967,966,961,962,962,956,952,955,955,956,951,953,948,951,953,952,953,952,949,948,950,951,952,952,949,949,949,953,951,951,946,941,938,930,924,919,908,904,912,935,963,992,1023,1070,1131,1183,1221,1234,1219,1167,1078,995,942,920,924,937,946,946,944,942,940,941,943,942,941,940,937,937,939,940,939,938,937,938,939,942,941,940,938,937,938,939,938,936,936,936,938,940,939,939,937,938,937,938,939,938,937,939,940,940,940,940,936,937,938,939,937,936,935,937,937,939,939,937,935,937,935,940,939,938,936,937,940,940,940,941,938,938,938,941,939,937,934,935,936,937,936,937,935,936,936,939,939,939,938,937,942,945,945,946,947,946,948,953,956,954,955,956,960,962,964,963,958,959,960,960,963,959,959,961,960,961,961,960,958,959,960,961,962,961,958,960,958,960,960,956,954,955,957,957,956,957,955,952,955,957,955,953,950,952,953,955,953,953,951,953,953,954,953,952,948,950,951,952,951,948,948,947,950,951,952,951,949,949,950,952,953,951,950,950,952,953,954,953,949,953,953,956,956,953,951,951,954,954,954,953,951,950,954,955,955,954,952,954,959,960,965,965,962,967,968,971,974,972,971,973,976,979,978,975,971,971,971,970,973,972,969,967,969,972,979,978,974,967,964,967,964,959,955,955,957,959,960,958,954,953,952,953,952,951,949,948,949,950,950,950,945,945,947,946,946,948,946,948,947,950,948,944,939,932,924,923,916,905,901,917,946,979,1019,1071,1126,1173,1194,1176,1114,1028,954,918,919,932,941,945,941,941,942,946,947,944,939,939,941,941,943,939,939,940,940,940,942,940,939,939,942,944,945,944,942,941,943,945,947,943,942,944,943,946,946,946,944,943,945,944,944,944,940,941,944,945,944,942,942,944,944,947,945,943,941,942,943,947,946,945,943,946,947,946,945,942,941,940,943,945,944,941,938,938,940,942,940,937,939,938,939,941,941,939,939,940,943,945,944,943,939,942,945,946,947,948,949,950,953,954,955,954,952,954,958,958,961,956,953,955,958,959,959,959,955,956,959,959,959,956,951,954,954,953,951,948,948,949,952,954,953,949,948,951,949,952,951,950,950,949,951,952,952,949,950,948,948,948,949,943,944,945,947,950,948,947,943,943,945,944,943,942,943,943,944,946,947,945,942,941,944,944,946,945,941,944,944,944,944,944,940,942,941,943,947,946,945,946,949,949,948,946,945,948,948,955,957,955,957,955,959,960,960,957,955,958,962,962,964,962,961,964,966,966,963,963,962,961,963,964,960,960,961,964,967,965,958,955,948,945,944,945,940,937,935,935,936,938,939,937,935,937,939,941,939,936,935,934,937,938,939,939,935,934,935,936,930,923,915,910,905,895,890,896,910,930,950,966,986,1010,1041,1084,1127,1164,1188,1192,1162,1091,1016,957,920,906,908,918,926,929,930,927,926,928,929,930,929,928,924,923,923,927,929,927,924,924,927,930,927,925,925,925,928,928,932,932,930,929,928,929,926,926,923,924,927,928,927,928,924,925,928,929,929,930,926,929,928,930,927,928,926,927,926,926,925,924,921,921,925,927,931,929,926,925,924,925,924,922,923,922,924,923,922,919,915,916,918,918,917,914,912,915,916,917,918,916,915,914,918,919,921,924,922,926,931,933,936,936,934,939,942,944,943,942,943,944,947,947,948,946,943,944,947,948,948,948,944,945,948,951,952,947,944,944,945,948,947,946,945,945,946,948,947,945,943,944,943,945,943,943,940,942,943,944,943,940,937,939,939,942,940,939,938,938,939,944,941,939,938,937,940,941,940,939,938,937,939,939,938,937,935,935,937,939,939,936,935,936,938,939,941,937,936,937,939,940,941,937,938,937,940,941,939,939,938,937,940,940,941,940,937,938,941,946,948,947,947,949,948,954,957,957,958,957,960,963,966,964,959,959,959,960,957,958,955,955,956,955,957,959,959,960,956,955,952,948,942,940,941,942,941,938,937,937,937,937,938,937,934,932,934,937,936,935,933,933,934,934,935,935,930,931,933,935,934,935,932,930,926,920,916,911,902,896,896,911,935,961,992,1036,1092,1144,1181,1194,1172,1115,1040,972,927,910,908,913,923,925,929,927,927,926,927,930,929,926,925,924,923,926,925,924,925,925,925,929,929,926,925,926,928,928,928,926,924,924,928,929,927,928,923,927,928,931,928,926,925,926,927,927,929,926,925,924,926,928,927,925,924,927,927,929,928,927,925,924,926,927,927,925,924,925,927,929,930,927,926,928,924,927,929,926,923,925,924,925,924,920,918,919,921,923,921,921,918,919,922,923,923,923,918,920,921,926,926,925,923,926,932,935,938,938,938,943,942,946,944,945,944,946,947,949,949,948,949,949,951,951,951,951,946,949,950,951,951,950,946,948,951,951,950,946,946,946,948,950,948,947,944,945,946,947,946,945,943,942,945,946,944,943,945,943,946,948,948,947,945,945,945,945,946,944,943,944,944,945,945,944,940,943,941,946,943,943,941,941,943,945,945,943,940,942,944,947,947,945,941,942,944,947,946,946,942,942,945,943,945,942,941,942,943,944,945,944,941,942,945,946,945,947,948,951,955,960,961,961,956,956,961,962,962,960,961,961,965,966,966,967,963,963,964,964,962,959,957,959,960,962,961,961,962,968,970,967,961,956,951,951,949,947,946,939,937,939,941,942,943,941,938,938,940,941,942,941,936,938,941,942,940,939,937,938,937,939,936,931,921,914,913,910,903,896,894,905,930,959,988,1023,1070,1127,1175,1201,1210,1191,1137,1056,984,941,917,912,912,920,927,935,937,936,935,933,933,934,933,930,927,927,928,929,928,928,926,926,926,928,930,928,925,926,928,932,932,933,928,928,932,933,933,932,926,929,929,931,934,931,929,930,931,933,934,933,930,929,933,935,935,934,930,931,933,934,935,932,929,929,931,934,934,929,930,930,930,932,934,930,930,929,931,934,935,931,930,930,933,931,934,932,931,929,932,933,932,930,928,928,929,931,931,929,929,927,930,933,932,931,930,929,934,936,938,938,937,940,945,947,949,950,950,951,953,956,954,952,949,952,954,955,955,952,951,951,953,953,953,952,949,951,951,953,954,952,952,953,953,957,955,954,950,950,950,952,951,948,947,947,951,951,952,951,948,948,950,953,953,951,950,949,951,954,953,948,947,948,951,950,948,949,944,946,946,949,949,946,944,943,945,945,945,944,944,943,946,949,949,946,945,947,947,947,948,948,944,946,946,949,948,946,944,946,946,947,947,946,945,946,946,949,948,949,947,947,948,949,950,948,949,948,951,955,956,954,955,959,961,961,963,961,961,963,966,968,970,970,967,968,968,969,969,966,964,966,967,969,969,966,967,971,974,972,966,961,956,950,949,950,950,946,944,944,946,947,946,943,941,943,943,944,947,945,941,942,944,945,943,940,940,939,941,942,941,936,927,920,916,914,908,898,888,885,890,895,905,919,931,946,958,979,1014,1061,1112,1158,1186,1189,1156,1095,1017,954,924,918,923,930,933,934,934,937,938,936,934,933,933,936,937,936,934,934,936,939,939,939,933,933,938,939,936,936,934,935,937,939,940,938,937,938,937,938,938,935,934,934,937,938,939,939,937,937,941,940,938,937,935,935,936,936,936,936,936,934,937,938,939,940,936,935,940,940,940,939,937,939,942,942,940,938,936,937,935,941,938,940,937,937,938,939,939,938,935,936,936,939,936,935,932,932,935,935,937,935,933,932,937,938,937,936,935,936,941,942,946,945,945,945,948,952,951,949,946,947,953,955,955,956,952,952,952,957,956,955,952,952,953,954,954,951,950,948,949,953,952,951,950,951,953,955,954,953,949,948,951,954,953,951,949,950,951,950,950,949,947,946,948,948,949,948,946,945,946,948,949,947,943,946,945,949,946,946,942,943,944,947,946,943,943,944,945,948,948,947,946,946,947,948,945,944,943,941,945,946,946,949,946,946,948,948,947,946,942,944,946,948,947,946,948,952,954,959,958,958,958,955,962,965,967,968,968,969,971,969,970,969,964,965,967,965,964,959,959,963,967,967,962,956,950,948,946,948,948,947,942,943,945,946,946,942,938,941,940,943,943,943,943,942,943,942,941,939,939,941,941,943,943,942,939,937,935,927,920,915,908,901,903,919,948,978,1014,1065,1124,1170,1191,1175,1120,1039,969,930,915,915,923,930,933,936,936,935,935,934,935,934,936,934,932,931,935,935,934,933,931,930,932,934,933,933,931,932,936,937,936,934,933,934,935,935,935,933,933,935,936,936,933,929,931,931,933,936,936,935,935,936,936,937,936,933,931,930,929,934,935,936,934,932,934,937,937,933,932,931,932,934,933,933,931,933,933,935,931,930,928,927,926,927,928,925,921,921,923,927,928,925,923,925,925,929,932,931,931,935,938,942,941,942,942,943,948,951,951,950,950,952,953,955,955,958,955,955,957,956,957,958,955,956,957,960,958,957,954,955,958,960,958,956,955,954,957,959,956,955,952,950,956,958,956,956,953,954,952,956,957,954,952,950,952,953,954,951,949,952,950,952,952,950,948,948,950,954,954,953,952,954,954,956,955,953,951,951,952,954,952,951,949,948,949,952,953,950,948,951,954,953,953,953,950,952,954,957,955,954,953,955,959,962,962,963,963,963,967,969,971,971,972,975,976,979,976,977,975,972,976,978,975,973,974,973,972,972,976,976,975,974,970,967,963,958,955,955,953,953,952,951,951,950,954,954,953,951,949,949,952,953,953,952,948,947,949,950,947,942,935,926,923,919,908,905,915,942,972,1007,1048,1102,1151,1193,1210,1196,1139,1056,981,933,918,916,917,914,917,922,932,942,942,939,938,938,939,942,942,939,938,939,940,943,942,942,942,940,943,945,945,942,941,940,942,944,942,940,940,939,941,941,943,941,940,938,939,943,942,942,940,941,944,947,945,945,943,943,944,946,945,943,940,940,943,946,944,942,940,939,942,944,945,943,942,940,944,943,943,942,939,941,941,944,943,941,938,940,942,940,939,940,936,938,937,938,938,937,936,938,940,942,942,945,947,948,952,956,958,957,957,958,960,960,960,960,962,961,964,965,965,963,963,963,965,967,964,965,965,964,966,968,965,963,962,960,964,964,962,963,960,959,959,961,963,961,960,958,959,963,959,960,959,957,961,960,960,958,956,955,955,959,959,955,954,954,955,959,958,956,956,957,957,960,959,960,956,957,958,956,957,956,955,953,956,958,956,956,955,954,957,955,957,956,957,955,958,960,959,960,957,954,956,959,960,957,955,958,958,963,963,967,967,969,968,971,972,973,974,975,979,982,986,980,980,978,978,980,977,975,972,974,976,978,978,975,970,967,973,978,981,982,979,972,967,968,963,961,957,955,957,957,956,956,953,953,953,955,951,947,948,947,949,951,950,949,948,948,949,950,949,946,939,933,927,927,920,913,907,918,947,977,1005,1047,1097,1151,1192,1209,1196,1143,1063,980,935,923,928,937,941,943,946,946,945,944,942,941,944,945,943,943,943,943,945,948,946,943,943,942,943,945,945,943,942,943,943,946,944,943,940,939,943,944,945,945,944,944,944,947,946,945,945,944,947,947,945,945,944,944,945,946,945,944,942,942,943,946,947,945,943,942,944,948,947,946,945,945,947,947,946,944,945,944,946,947,945,943,943,940,942,945,943,942,939,940,939,944,943,943,945,943,945,947,945,944,945,945,948,951,949,950,950,949,953,958,957,955,954,957,960,963,962,961,960,960,962,963,962,965,963,964,966,967,966,965,964,963,962,962,963,964,961,958,960,963,962,960,961,956,960,961,959,958,958,957,961,961,962,960,960,960,958,960,960,958,956,958,957,957,958,956,955,954,955,958,957,955,954,954,954,957,956,956,952,953,952,955,956,954,953,954,955,957,958,955,954,953,954,957,957,957,955,953,953,954,957,956,954,952,955,959,957,955,954,956,959,963,966,971,973,969,972,974,971,971,973,974,974,979,979,976,974,974,974,975,975,974,975,973,974,975,971,971,969,969,975,982,983,977,969,964,966,964,958,955,954,951,953,956,954,952,949,950,950,952,951,949,948,947,952,952,952,952,948,946,945,949,943,934,926,920,915,910,904,911,928,957,990,1032,1087,1138,1170,1178,1155,1100,1024,962,927,920,929,940,942,939,939,938,938,942,940,936,937,935,936,939,941,939,936,933,934,938,939,938,935,936,938,941,941,938,936,935,937,941,940,936,937,935,939,940,938,938,936,937,937,939,938,936,936,935,936,939,938,938,937,937,937,938,937,937,938,939,940,943,940,938,937,935,939,942,940,937,936,935,937,938,938,936,932,929,934,936,936,936,929,932,934,936,936,934,932,930,931,934,934,931,933,932,934,937,937,938,937,938,943,943,947,947,947,946,948,950,949,950,952,951,952,955,952,954,954,953,954,954,956,953,949,948,951,954,952,950,950,948,947,950,949,946,944,945,947,952,951,949,949,947,948,947,947,946,945,944,946,948,949,947,945,941,943,946,946,943,942,941,943,944,942,946,944,943,945,945,944,943,942,942,945,945,945,944,941,941,943,946,946,944,945,943,945,945,944,941,940,938,940,941,942,942,941,943,946,949,947,946,946,946,947,948,951,950,950,950,955,957,956,956,956,957,960,963,963,964,962,959,961,962,960,959,961,960,959,960,959,958,960,963,963,960,952,945,946,941,942,943,940,937,935,933,937,939,940,937,938,933,935,938,938,937,933,934,936,938,941,935,931,934,935,934,925,917,913,909,903,896,898,909,932,959,994,1041,1094,1141,1171,1178,1150,1092,1015,952,920,910,913,922,925,926,924,926,929,928,930,928,927,928,927,930,929,930,928,927,932,933,930,930,929,927,931,933,934,932,932,931,930,932,930,927,923,924,927,928,927,928,928,927,929,931,931,930,930,929,930,930,929,929,926,928,927,929,929,926,925,925,927,928,930,926,926,926,927,930,929,927,927,924,928,929,929,928,924,923,925,927,926,920,919,918,919,921,920,916,917,916,917,919,918,918,920,922,924,929,928,929,929,931,934,936,938,940,940,939,944,947,947,947,946,949,949,950,952,950,949,950,953,955,954,954,951,951,952,953,952,950,949,949,949,952,950,948,944,944,946,948,948,949,947,947,949,951,952,950,946,947,948,949,950,946,944,943,945,948,947,946,945,943,946,946,943,944,944,943,944,945,945,943,941,942,942,944,944,944,941,942,944,947,945,944,941,943,944,947,947,946,945,944,946,947,947,945,944,944,945,944,944,942,943,943,946,946,947,947,944,948,948,955,956,958,959,960,961,962,962,962,961,963,969,968,969,970,967,965,965,967,966,962,960,962,964,967,964,961,962,966,969,972,968,960,958,954,954,953,949,947,944,938,941,945,947,943,941,942,942,944,944,942,943,941,944,945,944,943,942,943,945,945,944,943,942,940,940,941,928,920,916,915,914,905,901,906,926,955,986,1026,1074,1127,1169,1194,1199,1162,1088,1002,943,918,921,933,938,940,940,936,938,938,939,938,935,934,936,940,939,937,938,935,938,941,939,940,936,935,938,940,940,939,938,935,940,943,940,938,936,935,938,941,938,936,936,936,938,938,940,938,936,934,936,940,939,940,938,936,939,940,944,942,940,938,940,943,942,938,936,937,940,941,938,938,937,937,937,939,937,937,936,936,937,939,939,935,936,933,935,936,936,934,937,935,937,938,938,938,936,936,938,940,941,941,942,943,947,950,951,951,951,954,955,959,959,959,956,957,961,964,964,961,964,963,963,965,962,962,961,959,961,964,962,963,960,960,962,962,962,962,960,959,960,964,966,962,960,959,964,964,964,963,959,957,959,962,959,958,958,956,958,961,960,958,956,956,957,959,960,957,957,956,956,959,957,956,954,952,953,957,957,953,953,952,954,954,955,954,952,954,956,957,956,956,955,954,955,959,959,961,956,956,959,958,958,957,957,955,956,958,958,955,954,956,961,963,967,967,968,967,970,975,973,968,967,966,970,973,975,976,972,974,977,978,979,975,975,973,974,973,976,974,973,969,973,976,979,980,978,972,967,966,965,960,955,954,955,956,955,951,950,949,951,951,951,952,950,949,949,949,951,949,949,947,950,952,953,950,949,944,940,935,928,923,916,909,905,907,920,943,967,995,1034,1087,1141,1178,1194,1187,1144,1080,1004,950,925,924,933,943,946,944,942,942,946,949,947,945,941,942,944,945,947,946,943,942,946,945,946,944,939,942,945,946,944,944,943,942,945,945,946,943,944,945,945,947,946,945,943,944,947,949,947,946,943,944,946,944,943,942,944,941,943,946,947,944,942,942,946,948,950,947,945,946,949,947,946,946,945,942,945,948,944,944,944,942,942,944,942,944,941,939,941,944,943,943,941,940,943,945,946,944,941,944,946,948,949,947,948,947,951,952,955,955,955,956,958,962,962,961,959,960,963,965,965,965,964,962,965,967,967,965,962,963,963,965,966,966,963,963,965,966,963,962,960,959,960,964,961,961,959,959,960,962,961,958,956,957,959,961,960,959,958,956,959,962,961,959,958,957,955,957,957,956,955,955,955,957,957,956,953,955,954,957,958,957,956,954,957,958,957,956,955,953,955,958,957,956,954,952,953,955,956,954,955,953,955,958,959,957,957,955,958,959,959,958,958,962,966,970,971,968,964,965,970,976,977,977,977,977,978,981,980,978,978,973,975,977,976,975,970,969,970,972,975,978,979,979,984,984,978,971,965,963,962,962,960,956,955,952,953,956,956,954,952,952,951,952,949,948,947,948,949,950,949,950,949,947,951,950,947,944,936,932,929,929,920,908,903,915,935,961,988,1024,1071,1123,1163,1182,1176,1136,1073,1002,952,929,922,926,931,936,937,941,939,941,939,937,945,947,947,944,942,940,942,943,942,944,940,937,940,943,943,941,941,940,940,944,942,941,939,938,941,941,942,941,940,940,941,945,944,943,941,941,940,941,939,939,937,937,939,941,941,940,939,938,940,942,942,942,941,939,943,944,944,942,941,941,943,945,947,945,945,941,945,946,945,943,941,939,941,941,941,939,937,935,935,935,935,936,934,937,938,939,937,935,935,938,939,940,939,941,941,942,943,949,950,951,951,952,955,956,958,957,957,958,958,961,961,961,961,959,960,960,964,960,958,957,960,963,961,959,957,956,956,960,957,957,955,954,958,958,958,957,957,955,955,959,955,958,956,955,956,954,956,956,954,950,953,956,955,954,951,950,950,953,952,953,952,949,950,953,955,953,951,952,952,952,951,949,948,950,952,956,958,956,956,951,953,954,952,950,949,950,951,952,953,952,953,950,953,956,955,953,952,951,953,958,958,960,963,964,969,970,967,965,967,970,972,974,975,975,973,971,974,975,971,971,969,966,967,967,967,966,968,969,967,967,964,958,954,949,952,954,953,952,949,951,952,951,948,948,946,944,946,948,948,948,944,944,946,948,948,948,946,946,947,949,945,938,930,926,923,917,908,906,921,947,979,1012,1055,1107,1151,1179,1188,1166,1099,1022,964,932,922,925,933,939,942,938,942,945,946,944,942,939,941,942,942,941,940,939,941,942,942,942,941,938,938,939,940,939,938,937,939,939,939,937,936,937,939,940,940,941,939,938,940,941,938,940,937,937,939,939,938,938,934,936,938,943,940,937,938,937,937,940,939,937,939,940,941,942,942,938,938,936,937,939,938,933,933,930,931,931,931,930,926,927,930,931,931,931,929,929,932,931,931,929,931,929,931,934,936,937,938,941,945,946,947,951,951,952,957,959,958,955,958,959,960,962,961,960,960,957,960,961,962,960,961,960,959,960,960,959,958,957,958,963,961,958,957,956,954,958,959,959,954,954,955,958,960,955,956,954,956,959,958,956,954,952,953,955,954,953,952,950,952,956,955,953,953,953,954,956,955,953,953,953,951,953,953,952,949,951,953,954,954,953,951,950,952,953,953,953,952,952,952,957,957,953,952,951,955,957,955,952,952,952,955,963,965,967,967,967,969,971,972,976,975,973,978,978,975,973,972,973,972,973,973,971,970,968,967,969,975,977,977,970,966,966,962,958,954,953,953,953,952,950,947,947,950,950,950,949,947,945,950,952,951,950,949,949,950,951,950,943,933,924,922,920,910,905,913,935,969,1003,1044,1096,1150,1190,1221,1228,1205,1155,1098,1045,1008,980,956,937,922,910,908,920,932,941,941,939,941,943,941,940,937,937,938,940,939,939,937,937,941,943,942,941,942,942,941,944,943,940,938,937,940,943,939,938,938,939,941,942,943,940,939,938,941,943,943,942,940,939,942,944,941,944,942,942,944,946,946,943,942,939,941,943,944,942,939,940,941,943,942,944,940,941,939,943,940,939,938,936,941,940,939,940,936,939,939,939,939,938,937,935,939,941,941,941,942,944,945,950,952,952,955,955,955,958,961,959,958,957,960,963,963,962,959,958,961,960,962,960,961,959,960,962,962,961,961,959,962,963,962,960,959,959,960,961,959,959,956,956,957,957,958,956,954,955,956,957,958,957,955,955,955,957,956,955,953,954,954,956,955,952,951,950,953,953,952,951,950,949,953,954,952,951,951,951,954,955,954,955,951,952,952,953,951,952,952,951,951,954,955,953,953,952,954,957,956,957,957,957,962,966,970,968,965,965,967,970,972,973,973,972,974,973,975,976,974,972,972,973,972,971,970,969,971,972,974,976,978,971,967,965,957,958,954,950,952,953,952,951,949,948,952,952,951,951,949,948,950,948,949,949,946,947,949,948,947,941,930,923,920,917,909,905,918,941,974,1011,1057,1112,1158,1192,1211,1222,1206,1162,1100,1024,963,929,921,923,930,935,941,941,940,940,939,937,941,942,942,941,941,940,942,942,944,942,939,937,940,941,943,942,938,939,939,944,941,942,941,939,941,943,943,942,942,942,942,946,947,946,943,942,945,946,945,943,944,943,942,945,946,945,944,943,944,946,946,943,943,941,944,946,945,944,941,942,946,947,944,944,940,943,944,944,945,944,940,937,941,945,944,940,939,940,941,944,943,941,940,940,943,943,945,945,945,945,947,949,950,953,950,952,955,957,960,957,957,956,959,960,961,959,957,957,961,959,960,958,958,957,958,960,958,960,959,959,958,960,960,959,957,956,958,960,958,953,952,952,953,954,953,954,951,952,950,953,951,951,951,947,949,950,952,951,949,946,947,948,949,947,945,946,945,949,946,947,944,946,948,951,951,949,949,947,947,951,951,949,944,946,948,950,949,947,945,946,946,947,946,947,947,946,946,951,953,951,950,949,949,951,950,953,955,958,960,964,964,960,959,962,963,966,969,969,966,967,969,968,967,964,964,965,969,969,966,964,965,966,971,968,962,957,953,948,945,948,944,942,941,941,943,947,944,943,939,942,945,949,947,945,944,944,946,946,946,944,942,942,942,939,939,940,936,935,933,928,923,918,911,900,898,907,926,950,974,1008,1060,1115,1154,1177,1171,1132,1063,997,947,919,911,918,926,931,933,932,930,931,932,935,937,936,936,934,933,935,933,932,933,932,935,937,936,934,933,934,932,935,934,934,933,930,932,933,935,932,933,933,933,932,931,931,929,931,934,935,935,933,932,933,933,936,936,934,933,932,935,937,935,933,931,927,929,933,935,935,934,933,934,937,935,934,934,932,934,936,935,932,929,929,931,932,930,928,924,924,927,927,927,926,925,923,926,924,923,923,922,921,923,924,925,923,924,924,928,933,939,939,941,943,945,948,947,948,947,945,947,950,953,953,950,950,953,955,956,955,956,955,956,957,954,954,954,953,956,957,955,957,957,957,959,961,960,956,954,954,954,957,954,951,951,950,950,953,953,952,950,950,950,953,953,953,948,950,951,954,954,951,952,949,951,952,952,948,946,947,948,949,949,947,945,943,947,951,948,947,949,949,950,952,952,950,948,948,950,953,951,950,948,948,949,951,950,947,944,946,947,951,951,947,948,947,947,951,952,953,954,954,960,962,964,963,963,963,966,969,967,969,970,970,971,973,971,967,967,964,965,968,969,966,963,962,968,975,974,969,964,959,957,955,952,952,950,947,946,947,947,947,946,944,943,945,947,945,944,942,942,946,945,945,944,944,946,950,950,948,948,945,947,948,944,939,932,925,921,920,912,905,911,931,962,991,1030,1083,1134,1175,1196,1187,1139,1065,988,936,919,920,929,936,938,936,938,941,943,940,939,937,939,941,941,940,940,939,941,944,942,944,940,941,941,944,942,942,939,937,940,942,943,940,937,939,941,940,941,942,938,938,940,943,944,942,941,941,943,944,944,942,943,941,944,944,941,940,943,940,941,942,942,940,939,941,941,942,943,942,938,939,942,943,943,942,940,939,939,941,939,938,937,935,936,934,934,932,931,931,932,933,933,934,932,932,935,938,938,935,937,939,944,947,949,949,948,950,954,958,959,959,960,961,961,965,964,965,962,964,966,968,967,967,966,964,967,968,967,968,965,963,966,969,966,965,964,961,963,966,964,963,962,962,962,964,965,963,960,961,962,964,966,964,960,960,964,965,965,962,960,961,963,962,960,959,962,959,962,962,962,959,958,958,960,964,962,964,961,959,962,965,964,962,960,958,961,964,962,959,958,956,959,960,960,958,957,958,957,961,961,960,960,958,959,959,959,960,957,958,962,965,965,962,962,962,966,967,970,972,974,972,971,974,975,974,975,975,979,985,984,983,980,979,980,981,977,980,978,977,978,978,979,980,985,988,987,982,976,973,971,967,964,964,964,961,957,956,956,961,959,958,956,954,958,960,957,958,956,955,957,956,958,956,954,954,953,954,953,945,938,929,927,925,914,910,915,936,964,997,1032,1081,1137,1185,1213,1214,1172,1092,1009,950,928,931,943,950,951,948,949,952,953,950,949,949,951,952,952,949,946,946,947,949,948,950,947,948,948,950,950,947,945,944,947,951,949,947,947,946,947,949,948,948,946,947,948,949,946,946,948,948,951,954,951,951,951,949,951,950,949,949,948,947,948,951,952,952,950,949,951,953,953,951,951,951,950,953,951,952,951,950,952,955,956,952,951,949,951,952,951,950,946,946,947,951,951,950,948,948,949,952,953,953,950,949,953,957,958,960,959,960,963,965,966,966,964,965,968,969,970,971,969,968,970,972,970,971,968,966,968,971,971,971,970,967,971,973,973,970,970,968,971,972,971,969,967,966,968,968,967,966,965,964,965,967,967,965,964,963,963,964,966,966,961,962,966,968,968,966,963,965,965,966,963,963,960,962,961,964,962,962,959,960,960,963,961,962,959,957,963,962,964,962,962,962,962,965,967,965,963,962,962,964,967,965,963,963,964,965,965,963,962,960,961,964,965,964,965,964,968,972,974,976,975,976,978,980,982,985,983,985,987,988,989,987,983,981,983,983,984,983,979,978,979,985,990,989,981,975,974,976,972,968,963,961,961,962,963,962,960,957,960,963,962,962,958,956,958,962,959,957,956,955,957,959,959,956,956,955,951,947,940,935,930,922,917,925,945,969,994,1030,1080,1132,1174,1202,1218,1228,1223,1198,1157,1101,1042,988,950,931,929,933,936,945,950,952,954,953,953,952,953,953,954,954,951,951,952,953,953,950,949,948,950,950,952,951,947,946,951,952,952,953,951,949,951,952,952,952,950,950,952,954,953,952,951,948,952,954,952,954,948,947,950,953,953,953,949,950,955,955,954,953,951,953,956,958,956,953,951,952,957,956,954,951,950,950,950,951,952,953,949,948,950,952,951,948,947,947,946,948,947,946,943,945,948,952,954,952,952,952,956,959,960,962,960,957,961,963,961,963,961,962,968,966,967,965,964,967,967,966,966,965,964,961,964,966,968,965,963,964,964,966,964,962,961,960,962,960,961,959,958,957,958,959,962,960,958,956,955,957,958,957,955,957,958,959,959,957,957,957,956,958,957,955,954,952,953,955,957,956,953,953,955,957,956,954,953,953,953,955,955,954,953,953,955,954,957,957,953,953,954,954,956,953,949,951,953,955,952,954,953,955,957,961,965,965,963,963,966,968,971,971,971,971,976,978,977,974,974,973,973,972,971,970,970,971,972,969,970,972,974,972,969,966,965,960,956,950,952,950,947,946,944,947,947,948,950,948,947,948,949,949,948,945,944,945,948,950,950,949,945,944,947,947,947,944,938,935,931,929,922,909,903,911,934,963,996,1036,1084,1134,1172,1186,1167,1108,1031,964,931,920,926,934,939,935,937,937,936,938,939,938,942,941,941,940,935,933,937,939,940,938,937,937,937,939,938,938,935,934,935,938,937,938,937,938,939,940,941,941,936,935,937,939,938,936,931,932,934,935,934,933,935,934,936,938,941,939,939,935,938,938,935,935,933,935,934,936,937,936,934,932,934,936,936,934,933,930,930,929,927,929,929,927,928,929,927,926,923,922,921,923,928,925,927,927,932,936,937,940,941,940,944,948,952,952,950,950,953,956,958,959,957,954,955,957,957,957,953,954,954,956,957,956,954,953,955,958,957,956,954,954,955,956,954,953,952,949,950,952,953,950,950,948,948,951,949,948,946,946,946,950,950,949,945,948,949,953,952,950,948,947,948,949,948,948,945,946,947,949,948,948,947,945,946,945,950,948,949,948,950,952,949,947,947,946,946,949,949,949,947,945,944,948,949,949,946,947,951,957,957,961,959,960,963,963,965,966,966,966,967,969,967,965,961,963,964,967,967,966,963,962,963,963,968,967,966,961,957,955,955,949,949,948,947,946,947,946,946,944,940,943,943,943,941,940,940,940,944,941,940,939,941,943,943,942,939,934,928,922,919,914,902,895,898,911,927,945,962,978,1005,1038,1084,1133,1169,1187,1186,1150,1078,996,943,921,917,927,937,938,938,937,940,939,941,940,936,935,938,937,939,938,936,935,938,938,936,936,935,933,937,937,939,939,937,934,938,940,940,939,935,936,937,939,938,937,937,938,942,940,941,940,936,938,940,940,941,938,936,937,939,939,939,942,939,938,939,943,941,941,940,938,940,940,939,938,937,935,935,936,936,934,932,931,933,934,935,931,931,930,928,932,934,932,930,929,933,936,935,937,934,935,943,945,947,948,949,949,952,955,956,959,958,958,959,962,962,963,960,960,961,964,962,960,958,958,960,960,961,960,959,958,958,959,959,958,954,956,957,957,958,958,957,955,957,960,960,957,957,953,956,957,955,954,952,952,951,953,956,953,949,951,951,953,952,954,952,949,951,952,951,952,951,953,953,955,956,954,952,951,953,955,951,952,950,951,949,952,950,951,949,948,950,951,951,952,950,950,950,950,952,952,950,951,954,955,956,957,956,961,964,967,967,966,964,964,967,969,972,972,971,970,971,973,972,971,968,965,969,968,970,966,967,969,974,979,973,967,963,960,956,956,951,949,944,944,946,946,948,947,945,944,943,945,946,947,943,942,944,944,945,945,943,944,945,946,943,940,937,933,927,924,923,920,917,913,907,908,918,941,967,1000,1040,1088,1139,1175,1185,1168,1118,1048,982,939,927,929,937,942,943,937,939,938,940,941,939,937,935,935,935,936,939,938,936,937,937,936,936,936,934,932,936,939,939,939,938,936,937,939,940,936,937,936,935,937,937,936,934,933,936,937,938,937,937,936,936,936,935,937,937,935,936,938,939,938,936,935,937,938,940,938,937,933,936,936,936,936,935,935,937,937,939,939,936,936,934,937,936,933,933,931,934,934,935,933,933,933,933,937,937,937,938,939,942,947,949,948,948,948,953,953,953,953,951,956,958,958,957,954,955,956,956,958,958,957,955,954,955,956,956,957,953,953,955,956,954,953,953,953,952,953,952,951,950,949,951,951,951,950,947,946,946,950,950,949,947,945,945,951,950,946,946,945,947,946,946,948,946,943,946,945,944,944,941,942,943,946,947,945,942,943,948,949,947,949,948,947,950,950,950,950,948,948,947,949,948,948,947,946,951,951,953,949,952,953,957,961,963,964,962,961,966,970,970,970,966,967,969,971,970,967,964,962,965,967,967,965,962,961,967,972,971,964,955,954,954,955,950,945,946,944,946,945,944,944,941,938,939,943,940,938,939,940,942,943,944,943,939,939,941,941,940,938,929,923,917,917,907,896,894,907,928,946,956,971,989,1016,1054,1099,1136,1165,1178,1169,1122,1052,978,928,911,916,928,934,936,934,934,934,935,938,938,934,932,931,932,933,934,934,933,932,934,935,934,933,931,930,933,935,933,933,933,933,937,937,939,935,936,934,934,933,934,933,931,932,932,935,935,932,930,931,933,937,936,934,933,931,931,931,934,935,935,935,935,938,938,937,932,934,936,939,936,933,932,929,930,933,933,930,927,927,930,930,930,928,926,926,929,932,931,929,927,927,929,928,930,929,928,928,935,936,941,941,942,942,946,948,948,947,946,948,951,952,953,951,951,951,954,954,955,953,949,947,949,953,953,954,953,950,953,953,953,949,948,948,950,952,952,950,945,945,948,951,953,949,948,949,950,950,950,945,943,943,945,944,944,942,943,942,942,944,944,942,940,941,942,945,946,945,943,943,946,946,945,943,943,941,941,940,943,943,943,938,942,942,943,942,940,940,941,944,943,943,940,943,941,944,946,945,945,945,946,948,951,955,954,955,957,956,956,955,955,957,961,966,969,969,967,964,965,964,962,963,963,962,962,960,965,965,965,962,958,957,956,951,949,944,943,944,941,941,939,939,938,943,944,944,940,939,938,941,941,938,937,938,939,940,939,939,938,939,939,939,938,935,929,919,916,917,910,903,900,913,940,967,998,1043,1095,1144,1179,1190,1161,1096,1018,955,925,917,918,926,929,928,931,930,931,933,931,928,928,934,933,932,930,929,932,934,935,931,930,931,935,935,935,934,933,931,934,934,935,933,931,931,931,935,935,934,931,930,931,933,933,932,933,932,932,935,935,935,934,932,936,937,938,936,934,934,936,937,938,936,933,932,937,936,937,936,935,933,932,936,935,933,932,932,934,933,934,932,931,929,931,930,931,929,924,921,925,926,925,923,923,922,925,926,927,927,925,926,928,932,934,934,935,937,940,941,944,944,946,948,950,952,955,954,955,955,956,959,960,957,958,958,959,962,963,962,958,960,961,963,961,962,957,957,959,959,959,957,953,955,959,959,958,958,956,954,958,959,957,957,958,955,956,958,959,956,956,955,958,960,958,960,958,957,953,957,957,956,954,953,954,955,957,954,954,953,952,954,956,953,951,950,952,956,957,955,955,953,954,957,957,958,956,956,958,960,959,957,954,955,955,956,954,957,956,953,956,958,958,955,955,955,958,963,965,967,969,970,971,975,975,974,972,972,973,977,978,977,978,978,979,981,981,977,974,973,973,977,976,976,973,971,976,983,988,982,976,971,969,967,964,959,956,956,954,956,955,955,954,954,956,958,958,958,955,954,957,957,961,957,956,955,956,958,959,956,957,955,956,958,957,953,947,939,932,931,926,919,910,910,927,948,979,1011,1046,1096,1152,1192,1216,1218,1182,1112,1029,967,939,932,937,945,951,953,950,950,950,949,948,949,951,949,946,947,948,948,952,950,948,946,950,952,952,949,947,946,947,949,951,948,946,948,949,951,951,949,948,947,950,951,953,952,949,946,949,952,953,949,948,950,952,952,952,951,949,950,951,952,951,951,949,950,949,951,951,951,950,946,949,951,950,950,949,950,951,951,952,952,948,949,953,954,954,951,949,949,950,951,950,947,946,944,946,948,949,948,948,946,949,952,951,950,954,953,957,960,962,961,963,964,966,967,971,971,967,970,973,976,978,975,975,973,975,978,976,975,976,972,976,976,975,974,975,973,975,975,977,974,973,971,973,975,975,974,972,972,974,974,975,970,971,969,969,971,970,968,969,968,967,971,970,969,967,967,969,969,970,969,966,965,968,970,969,967,964,963,966,966,966,965,966,963,963,966,966,964,960,963,965,968,967,964,966,966,965,968,970,969,966,964,966,968,969,967,964,964,966,967,969,967,966,965,968,968,970,970,974,975,979,981,983,983,981,982,985,988,989,988,987,986,988,991,992,988,988,984,985,987,986,986,987,984,985,985,982,984,986,988,993,988,982,977,975,973,972,970,970,966,963,962,964,969,966,966,965,964,961,963,964,962,964,962,964,963,966,964,963,961,964,963,959,952,941,934,933,926,918,923,938,963,988,1012,1040,1081,1121,1162,1197,1215,1223,1200,1141,1051,982,944,939,943,952,957,957,956,957,956,954,953,954,954,954,953,950,950,953,954,953,956,954,951,953,955,955,955,954,954,956,958,959,958,955,954,957,959,958,959,956,956,957,958,958,958,955,956,957,955,957,957,954,955,957,957,957,956,955,955,955,957,956,957,953,953,955,957,957,956,954,954,955,958,957,954,954,952,954,958,956,956,955,954,957,955,957,955,953,953,957,956,956,953,952,952,954,956,958,956,956,957,963,964,965,964,966,966,970,973,971,971,972,973,975,976,976,974,974,974,977,977,978,974,974,974,975,978,977,975,973,973,974,975,975,974,973,972,972,974,974,974,971,970,969,971,972,972,970,968,970,972,973,970,970,968,970,971,970,972,969,969,971,970,970,970,969,969,970,972,970,968,967,966,966,967,970,967,963,963,966,966,968,967,967,966,965,969,969,967,966,966,968,971,972,970,970,968,970,971,971,970,967,968,969,970,969,971,969,969,970,972,971,971,972,973,975,981,984,982,980,982,985,989,989,992,989,991,992,993,993,990,987,986,989,990,990,989,985,983,987,993,997,992,985,980,981,981,975,970,966,962,965,967,967,967,965,963,965,965,965,964,963,963,965,966,965,964,961,960,962,963,964,964,963,961,957,950,943,938,933,922,917,921,936,960,983,1014,1065,1122,1167,1190,1187,1148,1078,1004,953,933,933,944]
- expecting = [271, 580, 884, 1181, 1469, 1770, 2055, 2339, 2634, 2939, 3255, 3551, 3831, 4120, 4412, 4700, 5000, 5299, 5596, 5889, 6172, 6454, 6744, 7047, 7347, 7646, 7936, 8216, 8503, 8785, 9070, 9377, 9682]
- frequency = 360.0
- adcgain = 200.0
- adczero = 1024
- peaks = wfdb.processing.gqrs_detect(x, frequency, adcgain, adczero, threshold=1.0)
- print(peaks)
- print(expecting)
- assert numpy.array_equal(peaks, expecting)
-
- def test_7(self):
- sig, fields = wfdb.srdsamp('sampledata/100', channels = [0, 1])
- ann = wfdb.rdann('sampledata/100', 'atr')
+ def test_correct_peaks(self):
+ sig, fields = wfdb.rdsamp('sample-data/100')
+ ann = wfdb.rdann('sample-data/100', 'atr')
fs = fields['fs']
min_bpm = 10
max_bpm = 350
min_gap = fs*60/min_bpm
- max_gap = fs*60/max_bpm
+ max_gap = fs * 60 / max_bpm
- y_idxs = wfdb.processing.correct_peaks(sig[:,0], ann.sample, min_gap, max_gap, smooth_window=150)
+ y_idxs = processing.correct_peaks(sig=sig[:,0], peak_inds=ann.sample,
+ search_radius=int(max_gap),
+ smooth_window_size=150)
yz = numpy.zeros(sig.shape[0])
yz[y_idxs] = 1
yz = numpy.where(yz[:10000]==1)[0]
- assert numpy.array_equal(yz, [77, 370, 663, 947, 1231, 1515, 1809, 2045, 2403, 2706, 2998, 3283, 3560, 3863, 4171, 4466, 4765, 5061, 5347, 5634, 5919, 6215, 6527, 6824, 7106, 7393, 7670, 7953, 8246, 8539, 8837, 9142, 9432, 9710, 9998])
+ expected_peaks = [77, 370, 663, 947, 1231, 1515, 1809, 2045, 2403,
+ 2706, 2998, 3283, 3560, 3863, 4171, 4466, 4765, 5061,
+ 5347, 5634, 5919, 6215, 6527, 6824, 7106, 7393, 7670,
+ 7953, 8246, 8539, 8837, 9142, 9432, 9710, 9998]
+
+ assert numpy.array_equal(yz, expected_peaks)
+
+class test_qrs():
+ """
+ Testing qrs detectors
+ """
+ def test_xqrs(self):
+ """
+ Run xqrs detector on record 100 and compare to reference annotations
+ """
+ sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
+ ann_ref = wfdb.rdann('sample-data/100','atr')
+
+ xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
+ xqrs.detect()
+
+ comparitor = processing.compare_annotations(ann_ref.sample[1:],
+ xqrs.qrs_inds,
+ int(0.1 * fields['fs']))
+
+ assert comparitor.specificity > 0.99
+ assert comparitor.positive_predictivity > 0.99
+ assert comparitor.false_positive_rate < 0.01
diff --git a/tests/test_records.py b/tests/test_records.py
deleted file mode 100644
index ddc1803a..00000000
--- a/tests/test_records.py
+++ /dev/null
@@ -1,374 +0,0 @@
-import wfdb
-import numpy as np
-import os
-
-# Target files created using the original WFDB Software Package version 10.5.24
-class test_rdsamp():
-
- # ---------------------------- 1. Basic Tests ---------------------------- #
-
- # Format 16/Entire signal/Digital
- # Target file created with: rdsamp -r sampledata/test01_00s | cut -f 2- >
- # target1a
- def test_1a(self):
- record = wfdb.rdsamp('sampledata/test01_00s', physical=False)
- sig = record.d_signals
- targetsig = np.genfromtxt('tests/targetoutputdata/target1a')
-
- # Compare data streaming from physiobank
- pbrecord = wfdb.rdsamp('test01_00s', physical=False, pbdir = 'macecgdb')
-
- # Test file writing
- record2 = wfdb.rdsamp('sampledata/test01_00s', physical=False)
- record2.signame = ['ECG_1', 'ECG_2', 'ECG_3', 'ECG_4']
- record2.wrsamp()
- recordwrite = wfdb.rdsamp('test01_00s', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(pbrecord)
- assert record2.__eq__(recordwrite)
-
- # Format 16 with byte offset/Selected Duration/Selected Channels/Physical
- # Target file created with: rdsamp -r sampledata/a103l -f 50 -t 160 -s 2 0
- # -P | cut -f 2- > target1b
- def test_1b(self):
- sig, fields = wfdb.srdsamp('sampledata/a103l',
- sampfrom=12500, sampto=40000, channels=[2, 0])
- siground = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target1b')
-
- # Compare data streaming from physiobank
- pbsig, pbfields = wfdb.srdsamp('a103l', pbdir = 'challenge/2015/training',
- sampfrom=12500, sampto=40000, channels=[2, 0])
- assert np.array_equal(siground, targetsig)
- assert np.array_equal(sig, pbsig) and fields == pbfields
-
- # Format 16 with byte offset/Selected Duration/Selected Channels/Digital
- # Target file created with: rdsamp -r sampledata/a103l -f 80 -s 0 1 | cut
- # -f 2- > target1c
- def test_1c(self):
- record = wfdb.rdsamp('sampledata/a103l',
- sampfrom=20000, channels=[0, 1], physical=False)
- sig = record.d_signals
- targetsig = np.genfromtxt('tests/targetoutputdata/target1c')
-
- # Compare data streaming from physiobank
- pbrecord = wfdb.rdsamp('a103l', pbdir = 'challenge/2015/training',
- sampfrom=20000, channels=[0, 1], physical=False)
-
- # Test file writing
- record.wrsamp()
- recordwrite = wfdb.rdsamp('a103l', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(pbrecord)
- assert record.__eq__(recordwrite)
-
- # Format 80/Selected Duration/Selected Channels/Physical
- # Target file created with: rdsamp -r sampledata/3000003_0003 -f 1 -t 8 -s
- # 1 -P | cut -f 2- > target1d
- def test_1d(self):
- sig, fields = wfdb.srdsamp('sampledata/3000003_0003',
- sampfrom=125, sampto=1000, channels=[1])
- siground = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target1d')
- targetsig = targetsig.reshape(len(targetsig), 1)
-
- # Compare data streaming from physiobank
- pbsig, pbfields = wfdb.srdsamp('3000003_0003', pbdir = 'mimic2wdb/30/3000003/',
- sampfrom=125, sampto=1000, channels=[1])
-
- assert np.array_equal(siground, targetsig)
- assert np.array_equal(sig, pbsig) and fields == pbfields
-
-
- # ---------------------------- 2. Special format tests ---------------------------- #
-
- # Format 212/Entire signal/Physical
- # Target file created with: rdsamp -r sampledata/100 -P | cut -f 2- >
- # target2a
- def test_2a(self):
- sig, fields = wfdb.srdsamp('sampledata/100')
- siground = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target2a')
-
- # Compare data streaming from physiobank
- pbsig, pbfields = wfdb.srdsamp('100', pbdir = 'mitdb')
- # This comment line was manually added and is not present in the original physiobank record
- del(fields['comments'][0])
-
- assert np.array_equal(siground, targetsig)
- assert np.array_equal(sig, pbsig) and fields == pbfields
-
- # Format 212/Selected Duration/Selected Channel/Digital.
- # Target file created with: rdsamp -r sampledata/100 -f 0.002 -t 30 -s 1 |
- # cut -f 2- > target2b
- def test_2b(self):
- record = wfdb.rdsamp('sampledata/100', sampfrom=1,
- sampto=10800, channels=[1], physical=False)
- sig = record.d_signals
- targetsig = np.genfromtxt('tests/targetoutputdata/target2b')
- targetsig = targetsig.reshape(len(targetsig), 1)
-
- # Compare data streaming from physiobank
- pbrecord = wfdb.rdsamp('100', sampfrom=1, sampto=10800, channels=[1], physical=False, pbdir = 'mitdb')
- # This comment line was manually added and is not present in the original physiobank record
- del(record.comments[0])
-
- # Test file writing
- record.wrsamp()
- recordwrite = wfdb.rdsamp('100', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(pbrecord)
- assert record.__eq__(recordwrite)
-
-
- # Format 212/Entire signal/Physical for odd sampled record
- # Target file created with: rdsamp -r sampledata/100_3chan -P | cut -f 2- >
- # target2c
- def test_2c(self):
- record = wfdb.rdsamp('sampledata/100_3chan')
- siground = np.round(record.p_signals, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target2c')
-
- # Test file writing
- record.d_signals = record.adc()
- record.wrsamp()
- recordwrite = wfdb.rdsamp('100_3chan')
- record.d_signals = None
-
- assert np.array_equal(siground, targetsig)
- assert record.__eq__(recordwrite)
-
-
- # Format 310/Selected Duration/Digital
- # Target file created with: rdsamp -r sampledata/3000003_0003 -f 0 -t 8.21 | cut -f 2- | wrsamp -o 310derive -O 310
- # rdsamp -r 310derive -f 0.007 | cut -f 2- > target2d
- def test_2d(self):
- record = wfdb.rdsamp('sampledata/310derive', sampfrom=2, physical=False)
- sig = record.d_signals
- targetsig = np.genfromtxt('tests/targetoutputdata/target2d')
- assert np.array_equal(sig, targetsig)
-
- # Format 311/Selected Duration/Physical
- # Target file created with: rdsamp -r sampledata/3000003_0003 -f 0 -t 8.21 -s 1 | cut -f 2- | wrsamp -o 311derive -O 311
- # rdsamp -r 311derive -f 0.005 -t 3.91 -P | cut -f 2- > target2e
- def test_2e(self):
- sig, fields = wfdb.srdsamp('sampledata/311derive', sampfrom=1, sampto=978)
- sig = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target2e')
- targetsig = targetsig.reshape([977, 1])
- assert np.array_equal(sig, targetsig)
-
-
- # ---------------------------- 3. Multi-dat file tests ---------------------------- #
-
- # Multi-dat/Entire signal/Digital
- # Target file created with: rdsamp -r sampledata/s0010_re | cut -f 2- >
- # target3a
- def test_3a(self):
- record= wfdb.rdsamp('sampledata/s0010_re', physical=False)
- sig = record.d_signals
- targetsig = np.genfromtxt('tests/targetoutputdata/target3a')
-
- # Compare data streaming from physiobank
- pbrecord= wfdb.rdsamp('s0010_re', physical=False, pbdir = 'ptbdb/patient001')
-
- # Test file writing
- record.wrsamp()
- recordwrite = wfdb.rdsamp('s0010_re', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(pbrecord)
- assert record.__eq__(recordwrite)
-
- # Multi-dat/Selected Duration/Selected Channels/Physical
- # Target file created with: rdsamp -r sampledata/s0010_re -f 5 -t 38 -P -s
- # 13 0 4 8 3 | cut -f 2- > target3b
- def test_3b(self):
- sig, fields = wfdb.srdsamp('sampledata/s0010_re', sampfrom=5000,
- sampto=38000, channels=[13, 0, 4, 8, 3])
- siground = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target3b')
-
- # Compare data streaming from physiobank
- pbsig, pbfields = wfdb.srdsamp('s0010_re', sampfrom=5000, pbdir = 'ptbdb/patient001',
- sampto=38000, channels=[13, 0, 4, 8, 3])
-
- assert np.array_equal(siground, targetsig)
- assert np.array_equal(sig, pbsig) and fields == pbfields
-
-
- # ------------------- 4. Skew and multiple samples/frame tests ------------------- #
-
- # Format 16 multi-samples per frame and skew digital
- # Target file created with: rdsamp -r sampledata/test01_00s_skewframe | cut
- # -f 2- > target4a
- def test_4a(self):
- record = wfdb.rdsamp('sampledata/test01_00s_skewframe', physical=False)
- sig = record.d_signals
- # The WFDB library rdsamp does not return the final N samples for all
- # channels due to the skew. The WFDB python rdsamp does return the final
- # N samples, filling in NANs for end of skewed channels only.
- sig = sig[:-3, :]
-
- targetsig = np.genfromtxt('tests/targetoutputdata/target4a')
-
- # Test file writing. Multiple samples per frame and skew.
- # Have to read all the samples in the record, ignoring skew
- recordnoskew = wfdb.rdsamp('sampledata/test01_00s_skewframe', physical=False,
- smoothframes=False, ignoreskew=True)
- recordnoskew.wrsamp(expanded=True)
- # Read the written record
- recordwrite = wfdb.rdsamp('test01_00s_skewframe', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(recordwrite)
-
- # Format 12 multi-samples per frame and skew/Entire Signal/Digital
- # Target file created with: rdsamp -r sampledata/03700181 | cut -f 2- >
- # target4b
- def test_4b(self):
- record = wfdb.rdsamp('sampledata/03700181', physical=False)
- sig = record.d_signals
- # The WFDB library rdsamp does not return the final N samples for all
- # channels due to the skew.
- sig = sig[:-4, :]
- # The WFDB python rdsamp does return the final N samples, filling in
- # NANs for end of skewed channels only.
- targetsig = np.genfromtxt('tests/targetoutputdata/target4b')
-
- # Compare data streaming from physiobank
- pbrecord = wfdb.rdsamp('03700181', physical=False, pbdir = 'mimicdb/037')
-
- # Test file writing. Multiple samples per frame and skew.
- # Have to read all the samples in the record, ignoring skew
- recordnoskew = wfdb.rdsamp('sampledata/03700181', physical=False,
- smoothframes=False, ignoreskew=True)
- recordnoskew.wrsamp(expanded=True)
- # Read the written record
- recordwrite = wfdb.rdsamp('03700181', physical=False)
-
- assert np.array_equal(sig, targetsig)
- assert record.__eq__(pbrecord)
- assert record.__eq__(recordwrite)
-
- # Format 12 multi-samples per frame and skew/Selected Duration/Selected Channels/Physical
- # Target file created with: rdsamp -r sampledata/03700181 -f 8 -t 128 -s 0
- # 2 -P | cut -f 2- > target4c
- def test_4c(self):
- sig, fields = wfdb.srdsamp('sampledata/03700181',
- channels=[0, 2], sampfrom=1000, sampto=16000)
- siground = np.round(sig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target4c')
-
- # Compare data streaming from physiobank
- pbsig, pbfields = wfdb.srdsamp('03700181', pbdir = 'mimicdb/037',
- channels=[0, 2], sampfrom=1000, sampto=16000)
-
- # Test file writing. Multiple samples per frame and skew.
- # Have to read all the samples in the record, ignoring skew
- recordnoskew = wfdb.rdsamp('sampledata/03700181', physical=False,
- smoothframes=False, ignoreskew=True)
- recordnoskew.wrsamp(expanded=True)
- # Read the written record
- writesig, writefields = wfdb.srdsamp('03700181', channels=[0, 2],
- sampfrom=1000, sampto=16000)
-
- assert np.array_equal(siground, targetsig)
- assert np.array_equal(sig, pbsig) and fields == pbfields
- assert np.array_equal(sig, writesig) and fields == writefields
-
-
- # Format 16 multi-samples per frame and skew, read expanded signals
- # Target file created with: rdsamp -r sampledata/test01_00s_skewframe -P -H | cut
- # -f 2- > target4d
- def test_4d(self):
- record = wfdb.rdsamp('sampledata/test01_00s_skewframe', smoothframes=False)
-
- # Upsample the channels with lower samples/frame
- expandsig = np.zeros((7994, 3))
- expandsig[:,0] = np.repeat(record.e_p_signals[0][:-3],2)
- expandsig[:,1] = record.e_p_signals[1][:-6]
- expandsig[:,2] = np.repeat(record.e_p_signals[2][:-3],2)
-
- siground = np.round(expandsig, decimals=8)
- targetsig = np.genfromtxt('tests/targetoutputdata/target4d')
-
- assert np.array_equal(siground, targetsig)
-
-
- # ------------------------ 5. Multi-segment Tests ------------------------ #
-
- # Multi-segment variable layout/Selected duration. All samples contained in one segment.
- # Target file created with:
- # rdsamp -r sampledata/multisegment/s00001/s00001-2896-10-10-00-31 -f s14428365 -t s14428375 -P | cut -f 2- > target5a
- def test_5a(self):
- record=wfdb.rdsamp('sampledata/multisegment/s00001/s00001-2896-10-10-00-31',
- sampfrom=14428365, sampto=14428375)
- siground=np.round(record.p_signals, decimals=8)
- targetsig=np.genfromtxt('tests/targetoutputdata/target5a')
-
- np.testing.assert_equal(siground, targetsig)
-
- # Multi-segment variable layout/Selected duration. Samples read from >1 segment
- # Target file created with:
- # rdsamp -r sampledata/multisegment/s00001/s00001-2896-10-10-00-31 -f s14428364 -t s14428375 -P | cut -f 2- > target5b
- def test_5b(self):
- record=wfdb.rdsamp('sampledata/multisegment/s00001/s00001-2896-10-10-00-31',
- sampfrom=14428364, sampto=14428375)
- siground=np.round(record.p_signals, decimals=8)
- targetsig=np.genfromtxt('tests/targetoutputdata/target5b')
-
- np.testing.assert_equal(siground, targetsig)
-
- # Multi-segment fixed layout entire signal.
- # Target file created with: rdsamp -r sampledata/multisegment/fixed1/v102s -P | cut -f 2- > target5c
- def test_5c(self):
- record=wfdb.rdsamp('sampledata/multisegment/fixed1/v102s')
- siground=np.round(record.p_signals, decimals=8)
- targetsig=np.genfromtxt('tests/targetoutputdata/target5c')
-
- np.testing.assert_equal(siground, targetsig)
-
- # Multi-segment fixed layout/selected duration. All samples contained in one segment
- # Target file created with: rdsamp -r sampledata/multisegment/fixed1/v102s -t s75000 -P | cut -f 2- > target5d
- def test_5d(self):
- record=wfdb.rdsamp('sampledata/multisegment/fixed1/v102s', sampto = 75000)
- siground=np.round(record.p_signals, decimals=8)
- targetsig=np.genfromtxt('tests/targetoutputdata/target5d')
-
- np.testing.assert_equal(siground, targetsig)
-
- # Test 11 - Multi-segment variable layout/Entire signal/Physical
- # Target file created with: rdsamp -r sampledata/matched/s25047/s25047-2704-05-04-10-44 -P | cut -f 2- > target11
- # def test_11(self):
- #sig, fields=rdsamp('sampledata/matched/s25047/s25047-2704-05-04-10-44')
- #sig=np.round(sig, decimals=8)
- # targetsig=np.genfromtxt('tests/targetoutputdata/target11')
- #assert np.array_equal(sig, targetsig)
-
- # Test 12 - Multi-segment variable layout/Selected duration/Selected Channels/Physical
- # Target file created with: rdsamp -r sampledata/matched/s00001/s00001-2896-10-10-00-31 -f s -t 4000 -s 3 0 -P | cut -f 2- > target12
- #def test_12(self):
- # record=rdsamp('sampledata/matched/s00001/s00001-2896-10-10-00-31', sampfrom=8750, sampto=500000)
- # siground=np.round(record.p_signals, decimals=8)
- # targetsig=np.genfromtxt('tests/targetoutputdata/target12')
- #
- # assert np.array_equal(sig, targetsig)
-
-
- # Cleanup written files
- @classmethod
- def tearDownClass(self):
-
- writefiles = ['03700181.dat','03700181.hea','100.atr','100.dat',
- '100.hea','1003.atr','100_3chan.dat','100_3chan.hea',
- '12726.anI','a103l.hea','a103l.mat','s0010_re.dat',
- 's0010_re.hea','s0010_re.xyz','test01_00s.dat',
- 'test01_00s.hea','test01_00s_skewframe.hea']
-
- for file in writefiles:
- if os.path.isfile(file):
- os.remove(file)
diff --git a/wfdb/__init__.py b/wfdb/__init__.py
index 639beedf..b9a3fc13 100644
--- a/wfdb/__init__.py
+++ b/wfdb/__init__.py
@@ -1,8 +1,8 @@
-from .readwrite.records import Record, MultiRecord, rdheader, rdsamp, srdsamp, wrsamp, dldatabase, dldatabasefiles
-from .readwrite._signals import estres, wrdatfile
-from .readwrite._headers import sig_classes
-from .readwrite.annotations import Annotation, rdann, wrann, show_ann_labels, ann_classes, ann_labels, ann_label_table
-from .readwrite.downloads import getdblist
-from .plot.plots import plotrec, plotann, plot_records
-from . import processing
+from .io.record import (Record, MultiRecord, rdheader, rdrecord, rdsamp,
+ wrsamp, dl_database)
+from .io.annotation import (Annotation, rdann, wrann, show_ann_labels,
+ show_ann_classes)
+from .io.download import get_dbs, get_record_list
+from .plot.plot import plot_items, plot_wfdb, plot_all_records
+
from .version import __version__
diff --git a/wfdb/io/__init__.py b/wfdb/io/__init__.py
new file mode 100644
index 00000000..c1c9bc2f
--- /dev/null
+++ b/wfdb/io/__init__.py
@@ -0,0 +1,6 @@
+from .record import (Record, MultiRecord, rdheader, rdrecord, rdsamp, wrsamp,
+ dl_database, sig_classes)
+from ._signal import est_res, wr_dat_file
+from .annotation import (Annotation, rdann, wrann, show_ann_labels,
+ show_ann_classes)
+from .download import get_dbs, get_record_list, dl_files
diff --git a/wfdb/io/_header.py b/wfdb/io/_header.py
new file mode 100644
index 00000000..5e4f842a
--- /dev/null
+++ b/wfdb/io/_header.py
@@ -0,0 +1,644 @@
+from calendar import monthrange
+from collections import OrderedDict
+import numpy as np
+import os
+import re
+
+from . import download
+from . import _signal
+
+
+class FieldSpecification(object):
+ """
+ Class for storing specifications for wfdb record fields
+ """
+ def __init__(self, allowed_types, delimiter, dependency, write_req,
+ read_def, write_def):
+ # Data types the field (or its elements) can be
+ self.allowed_types = allowed_types
+ # The text delimiter that preceeds the field if it is a field that gets written to header files.
+ self.delimiter = delimiter
+ # The required/dependent field which must also be present
+ self.dependency = dependency
+ # Whether the field is always required for writing a header (more stringent than origin WFDB library)
+ self.write_req = write_req
+ # The default value for the field when read if any
+ self.read_def = read_def
+ # The default value for the field to fill in before writing if any
+ self.write_def = write_def
+
+ # The read vs write default values are different for 2 reasons:
+ # 1. We want to force the user to be explicit with certain important
+ # fields when writing WFDB records fields, without affecting
+ # existing WFDB headers when reading.
+ # 2. Certain unimportant fields may be dependencies of other
+ # important fields. When writing, we want to fill in defaults
+ # so that the user doesn't need to. But when reading, it should
+ # be clear that the fields are missing.
+
+int_types = (int, np.int64, np.int32, np.int16, np.int8)
+float_types = int_types + (float, np.float64, np.float32)
+int_dtypes = ('int64', 'uint64', 'int32', 'uint32','int16','uint16')
+
+# Record specification fields
+rec_field_specs = OrderedDict([('record_name', FieldSpecification((str), '', None, True, None, None)),
+ ('n_seg', FieldSpecification(int_types, '/', 'record_name', True, None, None)),
+ ('n_sig', FieldSpecification(int_types, ' ', 'record_name', True, None, None)),
+ ('fs', FieldSpecification(float_types, ' ', 'n_sig', True, 250, None)),
+ ('counter_freq', FieldSpecification(float_types, '/', 'fs', False, None, None)),
+ ('base_counter', FieldSpecification(float_types, '(', 'counter_freq', False, None, None)),
+ ('sig_len', FieldSpecification(int_types, ' ', 'fs', True, None, None)),
+ ('base_time', FieldSpecification((str), ' ', 'sig_len', False, None, '00:00:00')),
+ ('base_date', FieldSpecification((str), ' ', 'base_time', False, None, None))])
+
+# Signal specification fields.
+sig_field_specs = OrderedDict([('file_name', FieldSpecification((str), '', None, True, None, None)),
+ ('fmt', FieldSpecification((str), ' ', 'file_name', True, None, None)),
+ ('samps_per_frame', FieldSpecification(int_types, 'x', 'fmt', False, 1, None)),
+ ('skew', FieldSpecification(int_types, ':', 'fmt', False, None, None)),
+ ('byte_offset', FieldSpecification(int_types, '+', 'fmt', False, None, None)),
+ ('adc_gain', FieldSpecification(float_types, ' ', 'fmt', True, 200., None)),
+ ('baseline', FieldSpecification(int_types, '(', 'adc_gain', True, 0, None)),
+ ('units', FieldSpecification((str), '/', 'adc_gain', True, 'mV', None)),
+ ('adc_res', FieldSpecification(int_types, ' ', 'adc_gain', False, None, 0)),
+ ('adc_zero', FieldSpecification(int_types, ' ', 'adc_res', False, None, 0)),
+ ('init_value', FieldSpecification(int_types, ' ', 'adc_zero', False, None, None)),
+ ('checksum', FieldSpecification(int_types, ' ', 'init_value', False, None, None)),
+ ('block_size', FieldSpecification(int_types, ' ', 'checksum', False, None, 0)),
+ ('sig_name', FieldSpecification((str), ' ', 'block_size', False, None, None))])
+
+# Segment specification fields.
+seg_field_specs = OrderedDict([('seg_name', FieldSpecification((str), '', None, True, None, None)),
+ ('seg_len', FieldSpecification(int_types, ' ', 'seg_name', True, None, None))])
+
+
+# Regexp objects for reading headers
+
+# Record Line Fields
+rx_record = re.compile(
+ ''.join(
+ [
+ "(?P[-\w]+)/?(?P\d*)[ \t]+",
+ "(?P\d+)[ \t]*",
+ "(?P\d*\.?\d*)/*(?P\d*\.?\d*)\(?(?P\d*\.?\d*)\)?[ \t]*",
+ "(?P\d*)[ \t]*",
+ "(?P\d*:?\d{,2}:?\d{,2}\.?\d*)[ \t]*",
+ "(?P\d{,2}/?\d{,2}/?\d{,4})"]))
+
+# Signal Line Fields
+rx_signal = re.compile(
+ ''.join(
+ [
+ "(?P[-\w]+\.?[\w]*~?)[ \t]+(?P\d+)x?"
+ "(?P\d*):?(?P\d*)\+?(?P\d*)[ \t]*",
+ "(?P-?\d*\.?\d*e?[\+-]?\d*)\(?(?P-?\d*)\)?/?(?P[\w\^\-\?%]*)[ \t]*",
+ "(?P\d*)[ \t]*(?P-?\d*)[ \t]*(?P-?\d*)[ \t]*",
+ "(?P-?\d*)[ \t]*(?P\d*)[ \t]*(?P[\S]?[^\t\n\r\f\v]*)"]))
+
+# Segment Line Fields
+rx_segment = re.compile('(?P\w*~?)[ \t]+(?P\d+)')
+
+
+class BaseHeaderMixin(object):
+ """
+ Mixin class with multi-segment header methods. Inherited by Record and
+ MultiRecord classes
+ """
+
+ def get_write_subset(self, spec_fields):
+ """
+ Helper function for get_write_fields.
+
+ - spec_fields is the set of specification fields
+ For record specs, it returns a list of all fields needed.
+ For signal specs, it returns a dictionary of all fields needed,
+ with keys = field and value = list of 1 or 0 indicating channel for the field
+ """
+
+ # record specification fields
+ if spec_fields == 'record':
+ write_fields=[]
+ fieldspecs = OrderedDict(reversed(list(rec_field_specs.items())))
+ # Remove this requirement for single segs
+ if not hasattr(self, 'n_seg'):
+ del(fieldspecs['n_seg'])
+
+ for f in fieldspecs:
+ if f in write_fields:
+ continue
+ # If the field is required by default or has been defined by the user
+ if fieldspecs[f].write_req or getattr(self, f) is not None:
+ rf=f
+ # Add the field and its recursive dependencies
+ while rf is not None:
+ write_fields.append(rf)
+ rf=fieldspecs[rf].dependency
+ # Add comments if any
+ if getattr(self, 'comments') is not None:
+ write_fields.append('comments')
+
+ # signal spec field. Need to return a potentially different list for each channel.
+ elif spec_fields == 'signal':
+ # List of lists for each channel
+ write_fields=[]
+
+ allwrite_fields=[]
+ fieldspecs = OrderedDict(reversed(list(sig_field_specs.items())))
+
+ for ch in range(self.n_sig):
+ # The fields needed for this channel
+ write_fieldsch = []
+ for f in fieldspecs:
+ if f in write_fieldsch:
+ continue
+
+ fielditem = getattr(self, f)
+ # If the field is required by default or has been defined by the user
+ if fieldspecs[f].write_req or (fielditem is not None and fielditem[ch] is not None):
+ rf=f
+ # Add the field and its recursive dependencies
+ while rf is not None:
+ write_fieldsch.append(rf)
+ rf=fieldspecs[rf].dependency
+
+ write_fields.append(write_fieldsch)
+
+ # Convert the list of lists to a single dictionary.
+ # keys = field and value = list of 1 or 0 indicating channel for the field
+ dictwrite_fields = {}
+
+ # For fields present in any channel:
+ for f in set([i for wsub in write_fields for i in wsub]):
+ dictwrite_fields[f] = [0]*self.n_sig
+
+ for ch in range(self.n_sig):
+ if f in write_fields[ch]:
+ dictwrite_fields[f][ch] = 1
+
+ write_fields = dictwrite_fields
+
+ return write_fields
+
+
+class HeaderMixin(BaseHeaderMixin):
+ """
+ Mixin class with single-segment header methods. Inherited by Record class.
+ """
+
+ def set_defaults(self):
+ """
+ Set defaults for fields needed to write the header if they have defaults.
+ This is NOT called by rdheader. It is only automatically called by the gateway wrsamp for convenience.
+ It is also not called by wrhea (this may be changed in the future) since
+ it is supposed to be an explicit function.
+
+ Not responsible for initializing the
+ attributes. That is done by the constructor.
+ """
+ rfields, sfields = self.get_write_fields()
+ for f in rfields:
+ self.set_default(f)
+ for f in sfields:
+ self.set_default(f)
+
+ # Write a wfdb header file. The signals or segments fields are not used.
+ def wrheader(self, write_dir=''):
+
+ # Get all the fields used to write the header
+ recwrite_fields, sigwrite_fields = self.get_write_fields()
+
+ # Check the validity of individual fields used to write the header
+
+ # Record specification fields (and comments)
+ for f in recwrite_fields:
+ self.check_field(f)
+
+ # Signal specification fields.
+ for f in sigwrite_fields:
+ self.check_field(f, sigwrite_fields[f])
+
+ # Check the cohesion of fields used to write the header
+ self.check_field_cohesion(recwrite_fields, list(sigwrite_fields))
+
+ # Write the header file using the specified fields
+ self.wr_header_file(recwrite_fields, sigwrite_fields, write_dir)
+
+
+ # Get the list of fields used to write the header. (Does NOT include d_signal or e_d_signal.)
+ # Separate items by record and signal specification field.
+ # Returns the default required fields, the user defined fields, and their dependencies.
+ # recwrite_fields includes 'comment' if present.
+ def get_write_fields(self):
+
+ # Record specification fields
+ recwrite_fields=self.get_write_subset('record')
+
+ # Add comments if any
+ if self.comments != None:
+ recwrite_fields.append('comments')
+
+ # Determine whether there are signals. If so, get their required fields.
+ self.check_field('n_sig')
+ if self.n_sig>0:
+ sigwrite_fields=self.get_write_subset('signal')
+ else:
+ sigwrite_fields = None
+
+ return recwrite_fields, sigwrite_fields
+
+ # Set the object's attribute to its default value if it is missing
+ # and there is a default. Not responsible for initializing the
+ # attribute. That is done by the constructor.
+ def set_default(self, field):
+
+ # Record specification fields
+ if field in rec_field_specs:
+ # Return if no default to set, or if the field is already present.
+ if rec_field_specs[field].write_def is None or getattr(self, field) is not None:
+ return
+ setattr(self, field, rec_field_specs[field].write_def)
+
+ # Signal specification fields
+ # Setting entire list default, not filling in blanks in lists.
+ elif field in sig_field_specs:
+
+ # Specific dynamic case
+ if field == 'file_name' and self.file_name is None:
+ self.file_name = self.n_sig*[self.record_name+'.dat']
+ return
+
+ item = getattr(self, field)
+
+ # Return if no default to set, or if the field is already present.
+ if sig_field_specs[field].write_def is None or item is not None:
+ return
+
+ # Set more specific defaults if possible
+ if field == 'adc_res' and self.fmt is not None:
+ self.adc_res=_signal.wfdbfmtres(self.fmt)
+ return
+
+ setattr(self, field, [sig_field_specs[field].write_def]*self.n_sig)
+
+ # Check the cohesion of fields used to write the header
+ def check_field_cohesion(self, recwrite_fields, sigwrite_fields):
+
+ # If there are no signal specification fields, there is nothing to check.
+ if self.n_sig>0:
+
+ # The length of all signal specification fields must match n_sig
+ # even if some of its elements are None.
+ for f in sigwrite_fields:
+ if len(getattr(self, f)) != self.n_sig:
+ raise ValueError('The length of field: '+f+' must match field n_sig.')
+
+ # Each file_name must correspond to only one fmt, (and only one byte offset if defined).
+ datfmts = {}
+ for ch in range(self.n_sig):
+ if self.file_name[ch] not in datfmts:
+ datfmts[self.file_name[ch]] = self.fmt[ch]
+ else:
+ if datfmts[self.file_name[ch]] != self.fmt[ch]:
+ raise ValueError('Each file_name (dat file) specified must have the same fmt')
+
+ datoffsets = {}
+ if self.byte_offset is not None:
+ # At least one byte offset value exists
+ for ch in range(self.n_sig):
+ if self.byte_offset[ch] is None:
+ continue
+ if self.file_name[ch] not in datoffsets:
+ datoffsets[self.file_name[ch]] = self.byte_offset[ch]
+ else:
+ if datoffsets[self.file_name[ch]] != self.byte_offset[ch]:
+ raise ValueError('Each file_name (dat file) specified must have the same byte offset')
+
+
+
+ def wr_header_file(self, recwrite_fields, sigwrite_fields, write_dir):
+ # Write a header file using the specified fields
+ header_lines=[]
+
+ # Create record specification line
+ recordline = ''
+ # Traverse the ordered dictionary
+ for field in rec_field_specs:
+ # If the field is being used, add it with its delimiter
+ if field in recwrite_fields:
+ stringfield = str(getattr(self, field))
+ # If fs is float, check whether it as an integer
+ if field == 'fs' and isinstance(self.fs, float):
+ if round(self.fs, 8) == float(int(self.fs)):
+ stringfield = str(int(self.fs))
+ recordline = recordline + rec_field_specs[field].delimiter + stringfield
+ header_lines.append(recordline)
+
+ # Create signal specification lines (if any) one channel at a time
+ if self.n_sig>0:
+ signallines = self.n_sig*['']
+ for ch in range(self.n_sig):
+ # Traverse the ordered dictionary
+ for field in sig_field_specs:
+ # If the field is being used, add each of its elements with the delimiter to the appropriate line
+ if field in sigwrite_fields and sigwrite_fields[field][ch]:
+ signallines[ch]=signallines[ch] + sig_field_specs[field].delimiter + str(getattr(self, field)[ch])
+ # The 'baseline' field needs to be closed with ')'
+ if field== 'baseline':
+ signallines[ch]=signallines[ch] +')'
+
+ header_lines = header_lines + signallines
+
+ # Create comment lines (if any)
+ if 'comments' in recwrite_fields:
+ comment_lines = ['# '+comment for comment in self.comments]
+ header_lines = header_lines + comment_lines
+
+ lines_to_file(self.record_name+'.hea', write_dir, header_lines)
+
+
+class MultiHeaderMixin(BaseHeaderMixin):
+ """
+ Mixin class with multi-segment header methods. Inherited by MultiRecord class.
+ """
+
+ # Set defaults for fields needed to write the header if they have defaults.
+ # This is NOT called by rdheader. It is only called by the gateway wrsamp for convenience.
+ # It is also not called by wrhea (this may be changed in the future) since
+ # it is supposed to be an explicit function.
+
+ # Not responsible for initializing the
+ # attribute. That is done by the constructor.
+ def set_defaults(self):
+ for field in self.get_write_fields():
+ self.set_default(field)
+
+ # Write a wfdb header file. The signals or segments fields are not used.
+ def wrheader(self, write_dir=''):
+
+ # Get all the fields used to write the header
+ write_fields = self.get_write_fields()
+
+ # Check the validity of individual fields used to write the header
+ for f in write_fields:
+ self.check_field(f)
+
+ # Check the cohesion of fields used to write the header
+ self.check_field_cohesion()
+
+ # Write the header file using the specified fields
+ self.wr_header_file(write_fields, write_dir)
+
+
+ # Get the list of fields used to write the multi-segment header.
+ # Returns the default required fields, the user defined fields, and their dependencies.
+ def get_write_fields(self):
+
+ # Record specification fields
+ write_fields=self.get_write_subset('record')
+
+ # Segment specification fields are all mandatory
+ write_fields = write_fields + ['seg_name', 'seg_len']
+
+ # Comments
+ if self.comments !=None:
+ write_fields.append('comments')
+ return write_fields
+
+ # Set a field to its default value if there is a default.
+ def set_default(self, field):
+
+ # Record specification fields
+ if field in rec_field_specs:
+ # Return if no default to set, or if the field is already present.
+ if rec_field_specs[field].write_def is None or getattr(self, field) is not None:
+ return
+ setattr(self, field, rec_field_specs[field].write_def)
+
+
+
+ # Check the cohesion of fields used to write the header
+ def check_field_cohesion(self):
+
+ # The length of seg_name and seg_len must match n_seg
+ for f in ['seg_name', 'seg_len']:
+ if len(getattr(self, f)) != self.n_seg:
+ raise ValueError('The length of field: '+f+' does not match field n_seg.')
+
+ # Check the sum of the 'seg_len' fields against 'sig_len'
+ if np.sum(self.seg_len) != self.sig_len:
+ raise ValueError("The sum of the 'seg_len' fields do not match the 'sig_len' field")
+
+
+ # Write a header file using the specified fields
+ def wr_header_file(self, write_fields, write_dir):
+
+ header_lines=[]
+
+ # Create record specification line
+ recordline = ''
+ # Traverse the ordered dictionary
+ for field in rec_field_specs:
+ # If the field is being used, add it with its delimiter
+ if field in write_fields:
+ recordline = recordline + rec_field_specs[field].delimiter + str(getattr(self, field))
+ header_lines.append(recordline)
+
+ # Create segment specification lines
+ segmentlines = self.n_seg*['']
+ # For both fields, add each of its elements with the delimiter to the appropriate line
+ for field in ['seg_name', 'seg_name']:
+ for segnum in range(0, self.n_seg):
+ segmentlines[segnum] = segmentlines[segnum] + seg_field_specs[field].delimiter + str(getattr(self, field)[segnum])
+
+ header_lines = header_lines + segmentlines
+
+ # Create comment lines (if any)
+ if 'comments' in write_fields:
+ comment_lines = ['# '+comment for comment in self.comments]
+ header_lines = header_lines + comment_lines
+
+ lines_to_file(self.record_name+'.hea', header_lines, write_dir)
+
+
+ def get_sig_segments(self, sig_name=None):
+ """
+ Get a list of the segment numbers that contain a particular signal
+ (or a dictionary of segment numbers for a list of signals)
+ Only works if information about the segments has been read in
+ """
+ if self.segments is None:
+ raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True")
+
+ # Default value = all signal names.
+ if sig_name is None:
+ sig_name = self.get_sig_name()
+
+ if isinstance(sig_name, list):
+ sigdict = {}
+ for sig in sig_name:
+ sigdict[sig] = self.get_sig_segments(sig)
+ return sigdict
+ elif isinstance(sig_name, str):
+ sigsegs = []
+ for i in range(self.n_seg):
+ if self.seg_name[i] != '~' and sig_name in self.segments[i].sig_name:
+ sigsegs.append(i)
+ return sigsegs
+ else:
+ raise TypeError('sig_name must be a string or a list of strings')
+
+ # Get the signal names for the entire record
+ def get_sig_name(self):
+ if self.segments is None:
+ raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rd_segments=True")
+
+ if self.layout == 'Fixed':
+ for i in range(self.n_seg):
+ if self.seg_name[i] != '~':
+ sig_name = self.segments[i].sig_name
+ break
+ else:
+ sig_name = self.segments[0].sig_name
+
+ return sig_name
+
+
+# Read header file to get comment and non-comment lines
+def get_header_lines(record_name, pb_dir):
+ # Read local file
+ if pb_dir is None:
+ with open(record_name + ".hea", 'r') as fp:
+ # Record line followed by signal/segment lines if any
+ header_lines = []
+ # Comment lines
+ comment_lines = []
+ for line in fp:
+ line = line.strip()
+ # Comment line
+ if line.startswith('#'):
+ comment_lines.append(line)
+ # Non-empty non-comment line = header line.
+ elif line:
+ # Look for a comment in the line
+ ci = line.find('#')
+ if ci > 0:
+ header_lines.append(line[:ci])
+ # comment on same line as header line
+ comment_lines.append(line[ci:])
+ else:
+ header_lines.append(line)
+ # Read online header file
+ else:
+ header_lines, comment_lines = download.stream_header(record_name, pb_dir)
+
+ return header_lines, comment_lines
+
+
+# Extract fields from a record line string into a dictionary
+def read_rec_line(rec_line):
+
+ # Dictionary for record fields
+ d_rec = {}
+
+ # Read string fields from record line
+ (d_rec['record_name'], d_rec['n_seg'], d_rec['n_sig'], d_rec['fs'],
+ d_rec['counter_freq'], d_rec['base_counter'], d_rec['sig_len'],
+ d_rec['base_time'], d_rec['base_date']) = re.findall(rx_record, rec_line)[0]
+
+ for field in rec_field_specs:
+ # Replace empty strings with their read defaults (which are mostly None)
+ if d_rec[field] == '':
+ d_rec[field] = rec_field_specs[field].read_def
+ # Typecast non-empty strings for numerical fields
+ else:
+ if rec_field_specs[field].allowed_types is int_types:
+ d_rec[field] = int(d_rec[field])
+ # fs may be read as float or int
+ elif field == 'fs':
+ fs = float(d_rec['fs'])
+ if round(fs, 8) == float(int(fs)):
+ fs = int(fs)
+ d_rec['fs'] = fs
+
+ return d_rec
+
+# Extract fields from signal line strings into a dictionary
+def read_sig_lines(sig_lines):
+ # Dictionary for signal fields
+ d_sig = {}
+
+ # Each dictionary field is a list
+ for field in sig_field_specs:
+ d_sig[field] = [None]*len(sig_lines)
+
+ # Read string fields from signal line
+ for i in range(0, len(sig_lines)):
+ (d_sig['file_name'][i], d_sig['fmt'][i],
+ d_sig['samps_per_frame'][i],
+ d_sig['skew'][i],
+ d_sig['byte_offset'][i],
+ d_sig['adc_gain'][i],
+ d_sig['baseline'][i],
+ d_sig['units'][i],
+ d_sig['adc_res'][i],
+ d_sig['adc_zero'][i],
+ d_sig['init_value'][i],
+ d_sig['checksum'][i],
+ d_sig['block_size'][i],
+ d_sig['sig_name'][i]) = rx_signal.findall(sig_lines[i])[0]
+
+ for field in sig_field_specs:
+ # Replace empty strings with their read defaults (which are mostly None)
+ # Note: Never set a field to None. [None]* n_sig is accurate, indicating
+ # that different channels can be present or missing.
+ if d_sig[field][i] == '':
+ d_sig[field][i] = sig_field_specs[field].read_def
+
+ # Special case: missing baseline defaults to ADCzero if present
+ if field == 'baseline' and d_sig['adc_zero'][i] != '':
+ d_sig['baseline'][i] = int(d_sig['adc_zero'][i])
+ # Typecast non-empty strings for numerical fields
+ else:
+ if sig_field_specs[field].allowed_types is int_types:
+ d_sig[field][i] = int(d_sig[field][i])
+ elif sig_field_specs[field].allowed_types is float_types:
+ d_sig[field][i] = float(d_sig[field][i])
+ # Special case: gain of 0 means 200
+ if field == 'adc_gain' and d_sig['adc_gain'][i] == 0:
+ d_sig['adc_gain'][i] = 200.
+
+ return d_sig
+
+
+# Extract fields from segment line strings into a dictionary
+def read_seg_lines(seg_lines):
+
+ # Dictionary for signal fields
+ d_seg = {}
+
+ # Each dictionary field is a list
+ for field in seg_field_specs:
+ d_seg[field] = [None]*len(seg_lines)
+
+ # Read string fields from signal line
+ for i in range(0, len(seg_lines)):
+ (d_seg['seg_name'][i], d_seg['seg_len'][i]) = rx_segment.findall(seg_lines[i])[0]
+
+ for field in seg_field_specs:
+ # Replace empty strings with their read defaults (which are mostly None)
+ if d_seg[field][i] == '':
+ d_seg[field][i] = seg_field_specs[field].read_def
+ # Typecast non-empty strings for numerical field
+ else:
+ if field == 'seg_len':
+ d_seg[field][i] = int(d_seg[field][i])
+
+ return d_seg
+
+
+def lines_to_file(file_name, write_dir, lines):
+ # Write each line in a list of strings to a text file
+ f = open(os.path.join(write_dir, file_name), 'w')
+ for l in lines:
+ f.write("%s\n" % l)
+ f.close()
diff --git a/wfdb/readwrite/_signals.py b/wfdb/io/_signal.py
similarity index 55%
rename from wfdb/readwrite/_signals.py
rename to wfdb/io/_signal.py
index 3d5394f7..f1adf31c 100644
--- a/wfdb/readwrite/_signals.py
+++ b/wfdb/io/_signal.py
@@ -1,382 +1,400 @@
+import math
import numpy as np
import os
-import math
-from . import downloads
-# All defined WFDB dat formats
-datformats = ["80","212","16","24","32"]
+from . import download
-specialfmts = ['212','310','311']
+# WFDB dat formats - https://www.physionet.org/physiotools/wag/signal-5.htm
+simple_fmts = ['80', '16', '24', '32']
+special_fmts = ['212', '310', '311']
+dat_fmts = simple_fmts + special_fmts
-# Class with signal methods
-# To be inherited by Record from records.py.
-class SignalsMixin(object):
+class SignalMixin(object):
+ """
+ Mixin class with signal methods. Inherited by Record class.
+ """
- def wrdats(self, expanded):
+ def wr_dats(self, expanded, write_dir):
# Write all dat files associated with a record
- # expanded=True to use e_d_signals instead of d_signals
+ # expanded=True to use e_d_signal instead of d_signal
- if not self.nsig:
+ if not self.n_sig:
return
-
+
# Get all the fields used to write the header
# Assuming this method was called through wrsamp,
# these will have already been checked in wrheader()
- writefields = self.getwritefields()
+ write_fields = self.get_write_fields()
if expanded:
- # Using list of arrays e_d_signals
- self.checkfield('e_d_signals', channels = 'all')
+ # Using list of arrays e_d_signal
+ self.check_field('e_d_signal', channels = 'all')
else:
- # Check the validity of the d_signals field
- self.checkfield('d_signals')
+ # Check the validity of the d_signal field
+ self.check_field('d_signal')
+
+ # Check the cohesion of the d_signal field against the other
+ # fields used to write the header
+ self.check_sig_cohesion(write_fields, expanded)
- # Check the cohesion of the d_signals field against the other fields used to write the header
- self.checksignalcohesion(writefields, expanded)
-
# Write each of the specified dat files
- self.wrdatfiles(expanded)
+ self.wr_dat_files(expanded=expanded, write_dir=write_dir)
- # Check the cohesion of the d_signals/e_d_signals field with the other fields used to write the record
- def checksignalcohesion(self, writefields, expanded):
- # Using list of arrays e_d_signals
+ def check_sig_cohesion(self, write_fields, expanded):
+ """
+ Check the cohesion of the d_signal/e_d_signal field with the other
+ fields used to write the record
+ """
+ # Using list of arrays e_d_signal
if expanded:
- # Set default sampsperframe
- spf = self.sampsperframe
+ # Set default samps_per_frame
+ spf = self.samps_per_frame
for ch in range(len(spf)):
if spf[ch] is None:
spf[ch] = 1
# Match the actual signal shape against stated length and number of channels
- if self.nsig != len(self.e_d_signals):
- raise ValueError('nsig does not match the length of e_d_signals')
- for ch in range(self.nsig):
- if len(self.e_d_signals[ch]) != spf[ch]*self.siglen:
- raise ValueError('Length of channel '+str(ch)+'does not match sampsperframe['+str(ch+']*siglen'))
+ if self.n_sig != len(self.e_d_signal):
+ raise ValueError('n_sig does not match the length of e_d_signal')
+ for ch in range(self.n_sig):
+ if len(self.e_d_signal[ch]) != spf[ch]*self.sig_len:
+ raise ValueError('Length of channel '+str(ch)+'does not match samps_per_frame['+str(ch+']*sig_len'))
# For each channel (if any), make sure the digital format has no values out of bounds
- for ch in range(0, self.nsig):
+ for ch in range(0, self.n_sig):
fmt = self.fmt[ch]
dmin, dmax = digi_bounds(self.fmt[ch])
-
- chmin = min(self.e_d_signals[ch])
- chmax = max(self.e_d_signals[ch])
+
+ chmin = min(self.e_d_signal[ch])
+ chmax = max(self.e_d_signal[ch])
if (chmin < dmin) or (chmax > dmax):
raise IndexError("Channel "+str(ch)+" contain values outside allowed range ["+str(dmin)+", "+str(dmax)+"] for fmt "+str(fmt))
-
+
# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)
- if self.nsig>0:
- if 'checksum' in writefields:
+ if self.n_sig>0:
+ if 'checksum' in write_fields:
realchecksum = self.calc_checksum(expanded)
if self.checksum != realchecksum:
- print("The actual checksum of e_d_signals is: ", realchecksum)
- raise ValueError("checksum field does not match actual checksum of e_d_signals")
- if 'initvalue' in writefields:
- realinitvalue = [self.e_d_signals[ch][0] for ch in range(self.nsig)]
- if self.initvalue != realinitvalue:
- print("The actual initvalue of e_d_signals is: ", realinitvalue)
- raise ValueError("initvalue field does not match actual initvalue of e_d_signals")
-
- # Using uniform d_signals
+ print("The actual checksum of e_d_signal is: ", realchecksum)
+ raise ValueError("checksum field does not match actual checksum of e_d_signal")
+ if 'init_value' in write_fields:
+ realinit_value = [self.e_d_signal[ch][0] for ch in range(self.n_sig)]
+ if self.init_value != realinit_value:
+ print("The actual init_value of e_d_signal is: ", realinit_value)
+ raise ValueError("init_value field does not match actual init_value of e_d_signal")
+
+ # Using uniform d_signal
else:
# Match the actual signal shape against stated length and number of channels
- if (self.siglen, self.nsig) != self.d_signals.shape:
- print('siglen: ', self.siglen)
- print('nsig: ', self.nsig)
- print('d_signals.shape: ', self.d_signals.shape)
- raise ValueError('siglen and nsig do not match shape of d_signals')
+ if (self.sig_len, self.n_sig) != self.d_signal.shape:
+ print('sig_len: ', self.sig_len)
+ print('n_sig: ', self.n_sig)
+ print('d_signal.shape: ', self.d_signal.shape)
+ raise ValueError('sig_len and n_sig do not match shape of d_signal')
# For each channel (if any), make sure the digital format has no values out of bounds
- for ch in range(0, self.nsig):
+ for ch in range(0, self.n_sig):
fmt = self.fmt[ch]
dmin, dmax = digi_bounds(self.fmt[ch])
-
- chmin = min(self.d_signals[:,ch])
- chmax = max(self.d_signals[:,ch])
+
+ chmin = min(self.d_signal[:,ch])
+ chmax = max(self.d_signal[:,ch])
if (chmin < dmin) or (chmax > dmax):
raise IndexError("Channel "+str(ch)+" contain values outside allowed range ["+str(dmin)+", "+str(dmax)+"] for fmt "+str(fmt))
-
+
# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)
- if self.nsig>0:
- if 'checksum' in writefields:
+ if self.n_sig>0:
+ if 'checksum' in write_fields:
realchecksum = self.calc_checksum()
if self.checksum != realchecksum:
- print("The actual checksum of d_signals is: ", realchecksum)
- raise ValueError("checksum field does not match actual checksum of d_signals")
- if 'initvalue' in writefields:
- realinitvalue = list(self.d_signals[0,:])
- if self.initvalue != realinitvalue:
- print("The actual initvalue of d_signals is: ", realinitvalue)
- raise ValueError("initvalue field does not match actual initvalue of d_signals")
-
-
- def set_p_features(self, do_dac = False, expanded=False):
+ print("The actual checksum of d_signal is: ", realchecksum)
+ raise ValueError("checksum field does not match actual checksum of d_signal")
+ if 'init_value' in write_fields:
+ realinit_value = list(self.d_signal[0,:])
+ if self.init_value != realinit_value:
+ print("The actual init_value of d_signal is: ", realinit_value)
+ raise ValueError("init_value field does not match actual init_value of d_signal")
+
+
+ def set_p_features(self, do_dac=False, expanded=False):
"""
- Use properties of the p_signals (expanded=False) or e_p_signals field to set other fields:
- - nsig
- - siglen
- If expanded=True, sampsperframe is also required.
+ Use properties of the p_signal (expanded=False) or e_p_signal field to set other fields:
+ - n_sig
+ - sig_len
+ If expanded=True, samps_per_frame is also required.
- If do_dac == True, the (e_)_d_signals field will be used to perform digital to analogue conversion
- to set the (e_)p_signals field, before (e_)p_signals is used.
+ If do_dac == True, the (e_)_d_signal field will be used to perform digital to analogue conversion
+ to set the (e_)p_signal field, before (e_)p_signal is used.
Regarding dac conversion:
- fmt, gain, and baseline must all be set in order to perform dac.
- Unlike with adc, there is no way to infer these fields.
- - Using the fmt, gain and baseline fields, dac is performed, and (e_)p_signals is set.
+ - Using the fmt, gain and baseline fields, dac is performed, and (e_)p_signal is set.
*Developer note: Seems this function will be very infrequently used.
The set_d_features function seems far more useful.
"""
if expanded:
- if do_dac == 1:
- self.checkfield('e_d_signals', channels = 'all')
- self.checkfield('fmt', 'all')
- self.checkfield('adcgain', 'all')
- self.checkfield('baseline', 'all')
- self.checkfield('sampsperframe', 'all')
+ if do_dac:
+ self.check_field('e_d_signal', channels = 'all')
+ self.check_field('fmt', 'all')
+ self.check_field('adc_gain', 'all')
+ self.check_field('baseline', 'all')
+ self.check_field('samps_per_frame', 'all')
# All required fields are present and valid. Perform DAC
- self.e_p_signals = self.dac(expanded)
+ self.e_p_signal = self.dac(expanded)
- # Use e_p_signals to set fields
- self.checkfield('e_p_signals', channels = 'all')
- self.siglen = int(len(self.e_p_signals[0])/self.sampsperframe[0])
- self.nsig = len(self.e_p_signals)
+ # Use e_p_signal to set fields
+ self.check_field('e_p_signal', channels = 'all')
+ self.sig_len = int(len(self.e_p_signal[0])/self.samps_per_frame[0])
+ self.n_sig = len(self.e_p_signal)
else:
- if do_dac == 1:
- self.checkfield('d_signals')
- self.checkfield('fmt', 'all')
- self.checkfield('adcgain', 'all')
- self.checkfield('baseline', 'all')
+ if do_dac:
+ self.check_field('d_signal')
+ self.check_field('fmt', 'all')
+ self.check_field('adc_gain', 'all')
+ self.check_field('baseline', 'all')
# All required fields are present and valid. Perform DAC
- self.p_signals = self.dac()
+ self.p_signal = self.dac()
- # Use p_signals to set fields
- self.checkfield('p_signals')
- self.siglen = self.p_signals.shape[0]
- self.nsig = self.p_signals.shape[1]
+ # Use p_signal to set fields
+ self.check_field('p_signal')
+ self.sig_len = self.p_signal.shape[0]
+ self.n_sig = self.p_signal.shape[1]
- def set_d_features(self, do_adc = False, singlefmt = 1, expanded=False):
+ def set_d_features(self, do_adc=False, single_fmt=True, expanded=False):
"""
- Use properties of the (e_)d_signals field to set other fields: nsig, siglen, initvalue, checksum, *(fmt, adcgain, baseline)
- If do_adc == True, the (e_)p_signals field will first be used to perform analogue to digital conversion to set the (e_)d_signals
- field, before (e_)d_signals is used.
+ Use properties of the (e_)d_signal field to set other fields:
+ - n_sig
+ - sig_len
+ - init_value
+ - checksum,
+ - *(fmt, adc_gain, baseline)
+
+ If `do_adc`, the `(e_)p_signal` field will first be used to perform
+ analogue to digital conversion to set the `(e_)d_signal`
+ field, before `(e_)d_signal` is used.
Regarding adc conversion:
- - If fmt is unset:
- - Neither adcgain nor baseline may be set. If the digital values used to store the signal are known, then the file
- format should also be known.
- - The most appropriate fmt for the signals will be calculated and the 'fmt' attribute will be set. Given that neither
- gain nor baseline are allowed to be set, optimal values for those fields will then be calculated and set as well.
-
- - If fmt is set:
- - If both adcgain and baseline are unset, optimal values for those fields will be calculated the fields will be set.
- - If both adcgain and baseline are set, the function will continue.
- - If only one of adcgain and baseline are set, this function will throw an error. It makes no sense to know only
- one of those fields.
-
- ADC will occur after valid values for fmt, adcgain, and baseline are present, using all three fields.
+ - If fmt is unset:
+ - Neither adc_gain nor baseline may be set. If the digital values
+ used to store the signal are known, then the file format should
+ also be known.
+ - The most appropriate fmt for the signals will be calculated and the
+ `fmt` attribute will be set. Given that neither `adc_gain` nor
+ `baseline` is allowed to be set, optimal values for those fields will
+ then be calculated and set as well.
+ - If fmt is set:
+ - If both adc_gain and baseline are unset, optimal values for those
+ fields will be calculated the fields will be set.
+ - If both adc_gain and baseline are set, the function will continue.
+ - If only one of adc_gain and baseline are set, this function will
+ raise an error. It makes no sense to know only one of those fields.
+ - ADC will occur after valid values for fmt, adc_gain, and baseline are
+ present, using all three fields.
+
"""
if expanded:
# adc is performed.
- if do_adc == True:
- self.checkfield('e_p_signals', channels = 'all')
+ if do_adc:
+ self.check_field('e_p_signal', channels='all')
- # If there is no fmt set
+ # If there is no fmt set it, adc_gain, and baseline
if self.fmt is None:
- # Make sure that neither adcgain nor baseline are set
- if self.adcgain is not None or self.baseline is not None:
+ # Make sure that neither adc_gain nor baseline are set
+ if self.adc_gain is not None or self.baseline is not None:
raise Exception('If fmt is not set, gain and baseline may not be set either.')
- # Choose appropriate fmts based on estimated signal resolutions.
- res = estres(self.e_p_signals)
- self.fmt = wfdbfmt(res, singlefmt)
+ # Choose appropriate fmts based on estimated signal resolutions.
+ res = est_res(self.e_p_signal)
+ self.fmt = wfdbfmt(res, single_fmt)
# If there is a fmt set
else:
- self.checkfield('fmt', 'all')
+ self.check_field('fmt', 'all')
# Neither field set
- if self.adcgain is None and self.baseline is None:
+ if self.adc_gain is None and self.baseline is None:
# Calculate and set optimal gain and baseline values to convert physical signals
- self.adcgain, self.baseline = self.calculate_adcparams()
+ self.adc_gain, self.baseline = self.calc_adc_params()
# Exactly one field set
- elif (self.adcgain is None) ^ (self.baseline is None):
+ elif (self.adc_gain is None) ^ (self.baseline is None):
raise Exception('If fmt is set, gain and baseline should both be set or not set.')
-
- self.checkfield('adcgain', 'all')
- self.checkfield('baseline', 'all')
+
+ self.check_field('adc_gain', 'all')
+ self.check_field('baseline', 'all')
# All required fields are present and valid. Perform ADC
- self.d_signals = self.adc(expanded)
+ self.d_signal = self.adc(expanded)
- # Use e_d_signals to set fields
- self.checkfield('e_d_signals', channels = 'all')
- self.siglen = int(len(self.e_d_signals[0])/self.sampsperframe[0])
- self.nsig = len(self.e_d_signals)
- self.initvalue = [sig[0] for sig in self.e_d_signals]
+ # Use e_d_signal to set fields
+ self.check_field('e_d_signal', channels='all')
+ self.sig_len = int(len(self.e_d_signal[0])/self.samps_per_frame[0])
+ self.n_sig = len(self.e_d_signal)
+ self.init_value = [sig[0] for sig in self.e_d_signal]
self.checksum = self.calc_checksum(expanded)
else:
# adc is performed.
- if do_adc == True:
- self.checkfield('p_signals')
+ if do_adc:
+ self.check_field('p_signal')
# If there is no fmt set
if self.fmt is None:
- # Make sure that neither adcgain nor baseline are set
- if self.adcgain is not None or self.baseline is not None:
+ # Make sure that neither adc_gain nor baseline are set
+ if self.adc_gain is not None or self.baseline is not None:
raise Exception('If fmt is not set, gain and baseline may not be set either.')
- # Choose appropriate fmts based on estimated signal resolutions.
- res = estres(self.p_signals)
- self.fmt = wfdbfmt(res, singlefmt)
+ # Choose appropriate fmts based on estimated signal resolutions.
+ res = est_res(self.p_signal)
+ self.fmt = wfdbfmt(res, single_fmt)
+ # Calculate and set optimal gain and baseline values to convert physical signals
+ self.adc_gain, self.baseline = self.calc_adc_params()
+
# If there is a fmt set
else:
-
- self.checkfield('fmt', 'all')
+ self.check_field('fmt', 'all')
# Neither field set
- if self.adcgain is None and self.baseline is None:
+ if self.adc_gain is None and self.baseline is None:
# Calculate and set optimal gain and baseline values to convert physical signals
- self.adcgain, self.baseline = self.calculate_adcparams()
+ self.adc_gain, self.baseline = self.calc_adc_params()
# Exactly one field set
- elif (self.adcgain is None) ^ (self.baseline is None):
+ elif (self.adc_gain is None) ^ (self.baseline is None):
raise Exception('If fmt is set, gain and baseline should both be set or not set.')
-
- self.checkfield('adcgain', 'all')
- self.checkfield('baseline', 'all')
+
+ self.check_field('adc_gain', 'all')
+ self.check_field('baseline', 'all')
# All required fields are present and valid. Perform ADC
- self.d_signals = self.adc()
+ self.d_signal = self.adc()
- # Use d_signals to set fields
- self.checkfield('d_signals')
- self.siglen = self.d_signals.shape[0]
- self.nsig = self.d_signals.shape[1]
- self.initvalue = list(self.d_signals[0,:])
+ # Use d_signal to set fields
+ self.check_field('d_signal')
+ self.sig_len = self.d_signal.shape[0]
+ self.n_sig = self.d_signal.shape[1]
+ self.init_value = list(self.d_signal[0,:])
self.checksum = self.calc_checksum()
-
def adc(self, expanded=False, inplace=False):
"""
- Performs analogue to digital conversion of the physical signal stored
- in p_signals if expanded is False, or e_p_signals if expanded is True.
+ Performs analogue to digital conversion of the physical signal stored
+ in p_signal if expanded is False, or e_p_signal if expanded is True.
- The p_signals/e_p_signals, fmt, gain, and baseline fields must all be
+ The p_signal/e_p_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the adc will be performed inplace on the variable,
- the d_signals/e_d_signals attribute will be set, and the
- p_signals/e_p_signals field will be set to None.
+ the d_signal/e_d_signal attribute will be set, and the
+ p_signal/e_p_signal field will be set to None.
Input arguments:
- expanded (default=False): Boolean specifying whether to transform the
- e_p_signals attribute (True) or the p_signals attribute (False).
+ e_p_signal attribute (True) or the p_signal attribute (False).
- inplace (default=False): Boolean specifying whether to automatically
set the object's corresponding digital signal attribute and set the
physical signal attribute to None (True), or to return the converted
- signal as a separate variable without changing the original physical
+ signal as a separate variable without changing the original physical
signal attribute (False).
-
+
Possible output argument:
- - d_signals: The digital conversion of the signal. Either a 2d numpy
+ - d_signal: The digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Example Usage:
import wfdb
- record = wfdb.rdsamp('sampledata/100')
+ record = wfdb.rdsamp('sample-data/100')
d_signal = record.adc()
record.adc(inplace=True)
record.dac(inplace=True)
"""
-
+
# The digital nan values for each channel
dnans = digi_nan(self.fmt)
-
+
# To do: choose the minimum return res needed
intdtype = 'int64'
# Do inplace conversion and set relevant variables.
if inplace:
if expanded:
- for ch in range(0, self.nsig):
+ for ch in range(0, self.n_sig):
# nan locations for the channel
- ch_nanlocs = np.isnan(self.e_p_signals[ch])
- np.multiply(self.e_p_signals[ch], self.adcgain[ch], self.e_p_signals[ch])
- np.add(e_p_signals[ch], self.baseline[ch], self.e_p_signals[ch])
- self.e_p_signals[ch] = self.e_p_signals[ch].astype(intdtype, copy=False)
- self.e_p_signals[ch][ch_nanlocs] = dnans[ch]
- self.e_d_signals = self.e_p_signals
- self.e_p_signals = None
+ ch_nanlocs = np.isnan(self.e_p_signal[ch])
+ np.multiply(self.e_p_signal[ch], self.adc_gain[ch], self.e_p_signal[ch])
+ np.add(e_p_signal[ch], self.baseline[ch], self.e_p_signal[ch])
+ self.e_p_signal[ch] = self.e_p_signal[ch].astype(intdtype, copy=False)
+ self.e_p_signal[ch][ch_nanlocs] = dnans[ch]
+ self.e_d_signal = self.e_p_signal
+ self.e_p_signal = None
else:
- nanlocs = np.isnan(self.p_signals)
- np.multiply(self.p_signals, self.adcgain, self.p_signals)
- np.add(self.p_signals, self.baseline, self.p_signals)
- self.p_signals = self.p_signals.astype(intdtype, copy=False)
- self.d_signals = self.p_signals
- self.p_signals = None
+ nanlocs = np.isnan(self.p_signal)
+ np.multiply(self.p_signal, self.adc_gain, self.p_signal)
+ np.add(self.p_signal, self.baseline, self.p_signal)
+ self.p_signal = self.p_signal.astype(intdtype, copy=False)
+ self.d_signal = self.p_signal
+ self.p_signal = None
# Return the variable
else:
if expanded:
- d_signals = []
- for ch in range(0, self.nsig):
+ d_signal = []
+ for ch in range(0, self.n_sig):
# nan locations for the channel
- ch_nanlocs = np.isnan(self.e_p_signals[ch])
- ch_d_signal = self.e_p_signals.copy()
- np.multiply(ch_d_signal, self.adcgain[ch], ch_d_signal)
+ ch_nanlocs = np.isnan(self.e_p_signal[ch])
+ ch_d_signal = self.e_p_signal.copy()
+ np.multiply(ch_d_signal, self.adc_gain[ch], ch_d_signal)
np.add(ch_d_signal, self.baseline[ch], ch_d_signal)
ch_d_signal = ch_d_signal.astype(intdtype, copy=False)
ch_d_signal[ch_nanlocs] = dnans[ch]
- d_signals.append(ch_d_signal)
+ d_signal.append(ch_d_signal)
else:
- nanlocs = np.isnan(self.p_signals)
+ nanlocs = np.isnan(self.p_signal)
# Cannot cast dtype to int now because gain is float.
- d_signals = self.p_signals.copy()
- np.multiply(d_signals, self.adcgain, d_signals)
- np.add(d_signals, self.baseline, d_signals)
- d_signals = d_signals.astype(intdtype, copy=False)
+ d_signal = self.p_signal.copy()
+ np.multiply(d_signal, self.adc_gain, d_signal)
+ np.add(d_signal, self.baseline, d_signal)
+ d_signal = d_signal.astype(intdtype, copy=False)
if nanlocs.any():
- for ch in range(d_signals.shape[1]):
+ for ch in range(d_signal.shape[1]):
if nanlocs[:,ch].any():
- d_signals[nanlocs[:,ch],ch] = dnans[ch]
-
- return d_signals
+ d_signal[nanlocs[:,ch],ch] = dnans[ch]
-
- def dac(self, expanded=False, returnres=64, inplace=False):
+ return d_signal
+
+
+ def dac(self, expanded=False, return_res=64, inplace=False):
"""
Performs the digital to analogue conversion of the signal stored
- in d_signals if expanded is False, or e_d_signals if expanded is True.
+ in d_signal if expanded is False, or e_d_signal if expanded is True.
- The d_signals/e_d_signals, fmt, gain, and baseline fields must all be
+ The d_signal/e_d_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the dac will be performed inplace on the variable,
- the p_signals/e_p_signals attribute will be set, and the
- d_signals/e_d_signals field will be set to None.
-
+ the p_signal/e_p_signal attribute will be set, and the
+ d_signal/e_d_signal field will be set to None.
+
Input arguments:
- - expanded (default=False): Boolean specifying whether to transform the
- e_d_signals attribute (True) or the d_signals attribute (False).
- - inplace (default=False): Boolean specifying whether to automatically
+ - expanded: Boolean specifying whether to transform the
+ e_d_signal attribute (True) or the d_signal attribute (False).
+ - inplace: Boolean specifying whether to automatically
set the object's corresponding physical signal attribute and set the
digital signal attribute to None (True), or to return the converted
- signal as a separate variable without changing the original digital
+ signal as a separate variable without changing the original digital
signal attribute (False).
-
+
Possible output argument:
- - p_signals: The physical conversion of the signal. Either a 2d numpy
+ - p_signal: The physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Example Usage:
import wfdb
- record = wfdb.rdsamp('sampledata/100', physical=False)
+ record = wfdb.rdsamp('sample-data/100', physical=False)
p_signal = record.dac()
record.dac(inplace=True)
record.adc(inplace=True)
@@ -386,9 +404,9 @@ def dac(self, expanded=False, returnres=64, inplace=False):
dnans = digi_nan(self.fmt)
# Get the appropriate float dtype
- if returnres == 64:
+ if return_res == 64:
floatdtype = 'float64'
- elif returnres == 32:
+ elif return_res == 32:
floatdtype = 'float32'
else:
floatdtype = 'float16'
@@ -396,187 +414,197 @@ def dac(self, expanded=False, returnres=64, inplace=False):
# Do inplace conversion and set relevant variables.
if inplace:
if expanded:
- for ch in range(0, self.nsig):
+ for ch in range(0, self.n_sig):
# nan locations for the channel
- ch_nanlocs = self.e_d_signals[ch] == dnans[ch]
- self.e_d_signals[ch] = self.e_d_signals[ch].astype(floatdtype, copy=False)
- np.subtract(self.e_d_signals[ch], self.baseline[ch], self.e_d_signals[ch])
- np.divide(self.e_d_signals[ch], self.adcgain[ch], self.e_d_signals[ch])
- self.e_d_signals[ch][ch_nanlocs] = np.nan
- self.e_p_signals = self.e_d_signals
- self.e_d_signals = None
+ ch_nanlocs = self.e_d_signal[ch] == dnans[ch]
+ self.e_d_signal[ch] = self.e_d_signal[ch].astype(floatdtype, copy=False)
+ np.subtract(self.e_d_signal[ch], self.baseline[ch], self.e_d_signal[ch])
+ np.divide(self.e_d_signal[ch], self.adc_gain[ch], self.e_d_signal[ch])
+ self.e_d_signal[ch][ch_nanlocs] = np.nan
+ self.e_p_signal = self.e_d_signal
+ self.e_d_signal = None
else:
- nanlocs = self.d_signals == dnans
+ nanlocs = self.d_signal == dnans
# Do float conversion immediately to avoid potential under/overflow
# of efficient int dtype
- self.d_signals = self.d_signals.astype(floatdtype, copy=False)
- np.subtract(self.d_signals, self.baseline, self.d_signals)
- np.divide(self.d_signals, self.adcgain, self.d_signals)
- self.d_signals[nanlocs] = np.nan
- self.p_signals = self.d_signals
- self.d_signals = None
+ self.d_signal = self.d_signal.astype(floatdtype, copy=False)
+ np.subtract(self.d_signal, self.baseline, self.d_signal)
+ np.divide(self.d_signal, self.adc_gain, self.d_signal)
+ self.d_signal[nanlocs] = np.nan
+ self.p_signal = self.d_signal
+ self.d_signal = None
# Return the variable
else:
if expanded:
- p_signals = []
- for ch in range(0, self.nsig):
+ p_signal = []
+ for ch in range(0, self.n_sig):
# nan locations for the channel
- ch_nanlocs = self.e_d_signals[ch] == dnans[ch]
- ch_p_signal = self.e_d_signals[ch].astype(floatdtype, copy=False)
+ ch_nanlocs = self.e_d_signal[ch] == dnans[ch]
+ ch_p_signal = self.e_d_signal[ch].astype(floatdtype, copy=False)
np.subtract(ch_p_signal, self.baseline[ch], ch_p_signal)
- np.divide(ch_p_signal, self.adcgain[ch], ch_p_signal)
+ np.divide(ch_p_signal, self.adc_gain[ch], ch_p_signal)
ch_p_signal[ch_nanlocs] = np.nan
- p_signals.append(ch_p_signal)
+ p_signal.append(ch_p_signal)
else:
- nanlocs = self.d_signals == dnans
- p_signals = self.d_signals.astype(floatdtype, copy=False)
- np.subtract(p_signals, self.baseline, p_signals)
- np.divide(p_signals, self.adcgain, p_signals)
- p_signals[nanlocs] = np.nan
-
- return p_signals
-
-
- # Compute appropriate gain and baseline parameters given the physical signal and the fmts
- # self.fmt must be a list with length equal to the number of signal channels in self.p_signals
- def calculate_adcparams(self):
-
- # digital - baseline / gain = physical
- # physical * gain + baseline = digital
+ nanlocs = self.d_signal == dnans
+ p_signal = self.d_signal.astype(floatdtype, copy=False)
+ np.subtract(p_signal, self.baseline, p_signal)
+ np.divide(p_signal, self.adc_gain, p_signal)
+ p_signal[nanlocs] = np.nan
+ return p_signal
+
+
+ def calc_adc_params(self):
+ """
+ Compute appropriate gain and baseline parameters given the physical
+ signal and the fmts.
+
+ digital - baseline / gain = physical
+ physical * gain + baseline = digital
+ """
gains = []
baselines = []
-
- # min and max ignoring nans, unless whole channel is nan. Should suppress warning message.
- minvals = np.nanmin(self.p_signals, axis=0)
- maxvals = np.nanmax(self.p_signals, axis=0)
-
+
+ # min and max ignoring nans, unless whole channel is nan.
+ # Should suppress warning message.
+ minvals = np.nanmin(self.p_signal, axis=0)
+ maxvals = np.nanmax(self.p_signal, axis=0)
+
dnans = digi_nan(self.fmt)
-
- for ch in range(0, np.shape(self.p_signals)[1]):
- dmin, dmax = digi_bounds(self.fmt[ch]) # Get the minimum and maximum (valid) storage values
- dmin = dmin + 1 # add 1 because the lowest value is used to store nans
+
+ for ch in range(0, np.shape(self.p_signal)[1]):
+ # Get the minimum and maximum (valid) storage values
+ dmin, dmax = digi_bounds(self.fmt[ch])
+ # add 1 because the lowest value is used to store nans
+ dmin = dmin + 1
dnan = dnans[ch]
-
+
pmin = minvals[ch]
pmax = maxvals[ch]
-
+
# map values using full digital range.
-
- # If the entire signal is nan, just put any.
+
+ # If the entire signal is nan, just put any.
if pmin == np.nan:
- gain = 1
+ gain = 1
baseline = 1
- # If the signal is just one value, store all values as digital 1.
+ # If the signal is just one value, store all values as digital 1.
elif pmin == pmax:
- if minval ==0:
+ if pmin == 0:
gain = 1
baseline = 1
else:
- gain = 1/minval # wait.. what if minval is 0...
- baseline = 0
+ gain = 1 / pmin
+ baseline = 0
+ # Regular mixed signal case
+ # Todo:
else:
-
- gain = (dmax-dmin) / (pmax - pmin)
- baseline = dmin - gain * pmin
-
- # What about roundoff error? Make sure values don't map to beyond range.
- baseline = int(baseline)
-
- # WFDB library limits...
+ gain = (dmax-dmin) / (pmax-pmin)
+ baseline = dmin - gain*pmin
+
+ # What about roundoff error? Make sure values don't map to beyond
+ # range.
+ baseline = int(baseline)
+
+ # WFDB library limits...
if abs(gain)>214748364 or abs(baseline)>2147483648:
- raise Exception('adcgain and baseline must have magnitudes < 214748364')
-
+ raise Exception('adc_gain and baseline must have magnitudes < 214748364')
+
gains.append(gain)
baselines.append(baseline)
-
+
return (gains, baselines)
- def convert_dtype(self, physical, returnres, smoothframes):
+ def convert_dtype(self, physical, return_res, smooth_frames):
if physical is True:
- returndtype = 'float'+str(returnres)
- if smoothframes is True:
- currentdtype = self.p_signals.dtype
+ returndtype = 'float'+str(return_res)
+ if smooth_frames is True:
+ currentdtype = self.p_signal.dtype
if currentdtype != returndtype:
- self.p_signals = self.p_signals.astype(returndtype, copy=False)
+ self.p_signal = self.p_signal.astype(returndtype, copy=False)
else:
- for ch in range(self.nsig):
- if self.e_p_signals[ch].dtype != returndtype:
- self.e_p_signals[ch] = self.e_p_signals[ch].astype(returndtype, copy=False)
+ for ch in range(self.n_sig):
+ if self.e_p_signal[ch].dtype != returndtype:
+ self.e_p_signal[ch] = self.e_p_signal[ch].astype(returndtype, copy=False)
else:
- returndtype = 'int'+str(returnres)
- if smoothframes is True:
- currentdtype = self.d_signals.dtype
+ returndtype = 'int'+str(return_res)
+ if smooth_frames is True:
+ currentdtype = self.d_signal.dtype
if currentdtype != returndtype:
# Do not allow changing integer dtype to lower value due to over/underflow
if int(str(currentdtype)[3:])>int(str(returndtype)[3:]):
raise Exception('Cannot convert digital samples to lower dtype. Risk of overflow/underflow.')
- self.d_signals = self.d_signals.astype(returndtype, copy=False)
+ self.d_signal = self.d_signal.astype(returndtype, copy=False)
else:
- for ch in range(self.nsig):
- currentdtype = self.e_d_signals[ch].dtype
+ for ch in range(self.n_sig):
+ currentdtype = self.e_d_signal[ch].dtype
if currentdtype != returndtype:
# Do not allow changing integer dtype to lower value due to over/underflow
if int(str(currentdtype)[3:])>int(str(returndtype)[3:]):
raise Exception('Cannot convert digital samples to lower dtype. Risk of overflow/underflow.')
- self.e_d_signals[ch] = self.e_d_signals[ch].astype(returndtype, copy=False)
+ self.e_d_signal[ch] = self.e_d_signal[ch].astype(returndtype, copy=False)
return
def calc_checksum(self, expanded=False):
"""
- Calculate the checksum(s) of the d_signals (expanded=False)
- or e_d_signals field (expanded=True)
+ Calculate the checksum(s) of the d_signal (expanded=False)
+ or e_d_signal field (expanded=True)
"""
if expanded:
- cs = [int(np.sum(self.e_d_signals[ch]) % 65536) for ch in range(self.nsig)]
+ cs = [int(np.sum(self.e_d_signal[ch]) % 65536) for ch in range(self.n_sig)]
else:
- cs = np.sum(self.d_signals, 0) % 65536
+ cs = np.sum(self.d_signal, 0) % 65536
cs = [int(c) for c in cs]
return cs
# Write each of the specified dat files
- def wrdatfiles(self, expanded=False):
+ def wr_dat_files(self, expanded=False, write_dir=''):
# Get the set of dat files to be written, and
- # the channels to be written to each file.
- filenames, datchannels = orderedsetlist(self.filename)
+ # the channels to be written to each file.
+ file_names, datchannels = orderedsetlist(self.file_name)
# Get the fmt and byte offset corresponding to each dat file
datfmts={}
datoffsets={}
- for fn in filenames:
+ for fn in file_names:
datfmts[fn] = self.fmt[datchannels[fn][0]]
- # byteoffset may not be present
- if self.byteoffset is None:
+ # byte_offset may not be present
+ if self.byte_offset is None:
datoffsets[fn] = 0
else:
- datoffsets[fn] = self.byteoffset[datchannels[fn][0]]
+ datoffsets[fn] = self.byte_offset[datchannels[fn][0]]
# Write the dat files
if expanded:
- for fn in filenames:
- wrdatfile(fn, datfmts[fn], None , datoffsets[fn], True, [self.e_d_signals[ch] for ch in datchannels[fn]], self.sampsperframe)
+ for fn in file_names:
+ wr_dat_file(fn, datfmts[fn], None , datoffsets[fn], True,
+ [self.e_d_signal[ch] for ch in datchannels[fn]],
+ self.samps_per_frame, write_dir=write_dir)
else:
# Create a copy to prevent overwrite
- dsig = self.d_signals.copy()
- for fn in filenames:
- wrdatfile(fn, datfmts[fn], dsig[:, datchannels[fn][0]:datchannels[fn][-1]+1], datoffsets[fn])
+ dsig = self.d_signal.copy()
+ for fn in file_names:
+ wr_dat_file(fn, datfmts[fn],
+ dsig[:, datchannels[fn][0]:datchannels[fn][-1]+1],
+ datoffsets[fn], write_dir=write_dir)
- def smoothframes(self, sigtype='physical'):
+ def smooth_frames(self, sigtype='physical'):
"""
Convert expanded signals with different samples/frame into
- a uniform numpy array.
-
+ a uniform numpy array.
+
Input parameters
- sigtype (default='physical'): Specifies whether to mooth
- the e_p_signals field ('physical'), or the e_d_signals
+ the e_p_signal field ('physical'), or the e_d_signal
field ('digital').
"""
- spf = self.sampsperframe[:]
+ spf = self.samps_per_frame[:]
for ch in range(len(spf)):
if spf[ch] is None:
spf[ch] = 1
@@ -585,29 +613,29 @@ def smoothframes(self, sigtype='physical'):
tspf = sum(spf)
if sigtype == 'physical':
- nsig = len(self.e_p_signals)
- siglen = int(len(self.e_p_signals[0])/spf[0])
- signal = np.zeros((siglen, nsig), dtype='float64')
+ n_sig = len(self.e_p_signal)
+ sig_len = int(len(self.e_p_signal[0])/spf[0])
+ signal = np.zeros((sig_len, n_sig), dtype='float64')
- for ch in range(nsig):
+ for ch in range(n_sig):
if spf[ch] == 1:
- signal[:, ch] = self.e_p_signals[ch]
+ signal[:, ch] = self.e_p_signal[ch]
else:
for frame in range(spf[ch]):
- signal[:, ch] += self.e_p_signals[ch][frame::spf[ch]]
+ signal[:, ch] += self.e_p_signal[ch][frame::spf[ch]]
signal[:, ch] = signal[:, ch] / spf[ch]
elif sigtype == 'digital':
- nsig = len(self.e_d_signals)
- siglen = int(len(self.e_d_signals[0])/spf[0])
- signal = np.zeros((siglen, nsig), dtype='int64')
+ n_sig = len(self.e_d_signal)
+ sig_len = int(len(self.e_d_signal[0])/spf[0])
+ signal = np.zeros((sig_len, n_sig), dtype='int64')
- for ch in range(nsig):
+ for ch in range(n_sig):
if spf[ch] == 1:
- signal[:, ch] = self.e_d_signals[ch]
+ signal[:, ch] = self.e_d_signal[ch]
else:
for frame in range(spf[ch]):
- signal[:, ch] += self.e_d_signals[ch][frame::spf[ch]]
+ signal[:, ch] += self.e_d_signal[ch][frame::spf[ch]]
signal[:, ch] = signal[:, ch] / spf[ch]
else:
raise ValueError("sigtype must be 'physical' or 'digital'")
@@ -617,60 +645,60 @@ def smoothframes(self, sigtype='physical'):
#------------------- Reading Signals -------------------#
-def rdsegment(filename, dirname, pbdir, nsig, fmt, siglen, byteoffset,
- sampsperframe, skew, sampfrom, sampto, channels,
- smoothframes, ignoreskew):
+def rd_segment(file_name, dirname, pb_dir, n_sig, fmt, sig_len, byte_offset,
+ samps_per_frame, skew, sampfrom, sampto, channels,
+ smooth_frames, ignore_skew):
"""
Read the samples from a single segment record's associated dat file(s)
- 'channels', 'sampfrom', 'sampto', 'smoothframes', and 'ignoreskew' are
+ 'channels', 'sampfrom', 'sampto', 'smooth_frames', and 'ignore_skew' are
user desired input fields.
All other input arguments are specifications of the segment
"""
# Avoid changing outer variables
- byteoffset = byteoffset[:]
- sampsperframe = sampsperframe[:]
+ byte_offset = byte_offset[:]
+ samps_per_frame = samps_per_frame[:]
skew = skew[:]
# Set defaults for empty fields
- for i in range(0, nsig):
- if byteoffset[i] == None:
- byteoffset[i] = 0
- if sampsperframe[i] == None:
- sampsperframe[i] = 1
+ for i in range(0, n_sig):
+ if byte_offset[i] == None:
+ byte_offset[i] = 0
+ if samps_per_frame[i] == None:
+ samps_per_frame[i] = 1
if skew[i] == None:
skew[i] = 0
# If skew is to be ignored, set all to 0
- if ignoreskew:
- skew = [0]*nsig
+ if ignore_skew:
+ skew = [0]*n_sig
# Get the set of dat files, and the
# channels that belong to each file.
- filename, datchannel = orderedsetlist(filename)
+ file_name, datchannel = orderedsetlist(file_name)
# Some files will not be read depending on input channels.
# Get the the wanted fields only.
- w_filename = [] # one scalar per dat file
+ w_file_name = [] # one scalar per dat file
w_fmt = {} # one scalar per dat file
- w_byteoffset = {} # one scalar per dat file
- w_sampsperframe = {} # one list per dat file
+ w_byte_offset = {} # one scalar per dat file
+ w_samps_per_frame = {} # one list per dat file
w_skew = {} # one list per dat file
w_channel = {} # one list per dat file
- for fn in filename:
- # intersecting dat channels between the input channels and the channels of the file
+ for fn in file_name:
+ # intersecting dat channels between the input channels and the channels of the file
idc = [c for c in datchannel[fn] if c in channels]
-
+
# There is at least one wanted channel in the dat file
if idc != []:
- w_filename.append(fn)
+ w_file_name.append(fn)
w_fmt[fn] = fmt[datchannel[fn][0]]
- w_byteoffset[fn] = byteoffset[datchannel[fn][0]]
- w_sampsperframe[fn] = [sampsperframe[c] for c in datchannel[fn]]
+ w_byte_offset[fn] = byte_offset[datchannel[fn][0]]
+ w_samps_per_frame[fn] = [samps_per_frame[c] for c in datchannel[fn]]
w_skew[fn] = [skew[c] for c in datchannel[fn]]
w_channel[fn] = idc
-
+
# Wanted dat channels, relative to the dat file itself
r_w_channel = {}
# The channels in the final output array that correspond to the read channels in each dat file
@@ -678,102 +706,102 @@ def rdsegment(filename, dirname, pbdir, nsig, fmt, siglen, byteoffset,
for fn in w_channel:
r_w_channel[fn] = [c - min(datchannel[fn]) for c in w_channel[fn]]
out_datchannel[fn] = [channels.index(c) for c in w_channel[fn]]
-
+
# Signals with multiple samples/frame are smoothed, or all signals have 1 sample/frame.
# Return uniform numpy array
- if smoothframes or sum(sampsperframe)==nsig:
+ if smooth_frames or sum(samps_per_frame)==n_sig:
# Figure out the largest required dtype for the segment to minimize memory usage
maxdtype = npdtype(wfdbfmtres(fmt, maxres=True), discrete=True)
# Allocate signal array. Minimize dtype
signals = np.zeros([sampto-sampfrom, len(channels)], dtype = maxdtype)
# Read each wanted dat file and store signals
- for fn in w_filename:
- signals[:, out_datchannel[fn]] = rddat(fn, dirname, pbdir, w_fmt[fn], len(datchannel[fn]),
- siglen, w_byteoffset[fn], w_sampsperframe[fn], w_skew[fn], sampfrom, sampto, smoothframes)[:, r_w_channel[fn]]
-
+ for fn in w_file_name:
+ signals[:, out_datchannel[fn]] = rddat(fn, dirname, pb_dir, w_fmt[fn], len(datchannel[fn]),
+ sig_len, w_byte_offset[fn], w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto, smooth_frames)[:, r_w_channel[fn]]
+
# Return each sample in signals with multiple samples/frame, without smoothing.
# Return a list of numpy arrays for each signal.
else:
signals=[None]*len(channels)
- for fn in w_filename:
- # Get the list of all signals contained in the dat file
- datsignals = rddat(fn, dirname, pbdir, w_fmt[fn], len(datchannel[fn]),
- siglen, w_byteoffset[fn], w_sampsperframe[fn], w_skew[fn], sampfrom, sampto, smoothframes)
+ for fn in w_file_name:
+ # Get the list of all signals contained in the dat file
+ datsignals = rddat(fn, dirname, pb_dir, w_fmt[fn], len(datchannel[fn]),
+ sig_len, w_byte_offset[fn], w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto, smooth_frames)
# Copy over the wanted signals
for cn in range(len(out_datchannel[fn])):
signals[out_datchannel[fn][cn]] = datsignals[r_w_channel[fn][cn]]
- return signals
+ return signals
-def rddat(filename, dirname, pbdir, fmt, nsig,
- siglen, byteoffset, sampsperframe,
- skew, sampfrom, sampto, smoothframes):
+def rddat(file_name, dirname, pb_dir, fmt, n_sig,
+ sig_len, byte_offset, samps_per_frame,
+ skew, sampfrom, sampto, smooth_frames):
"""
Get samples from a WFDB dat file.
- 'sampfrom', 'sampto', and smoothframes are user desired
- input fields. All other fields specify the file parameters.
+ 'sampfrom', 'sampto', and smooth_frames are user desired
+ input fields. All other fields specify the file parameters.
Returns all channels
Input arguments:
- - filename: The name of the dat file.
+ - file_name: The name of the dat file.
- dirname: The full directory where the dat file is located, if the dat file is local.
- - pbdir: The physiobank directory where the dat file is located, if the dat file is remote.
+ - pb_dir: The physiobank directory where the dat file is located, if the dat file is remote.
- fmt: The format of the dat file
- - nsig: The number of signals contained in the dat file
- - siglen : The signal length (per channel) of the dat file
- - byteoffset: The byte offsets of the dat file
- - sampsperframe: The samples/frame for the signals of the dat file
+ - n_sig: The number of signals contained in the dat file
+ - sig_len : The signal length (per channel) of the dat file
+ - byte_offset: The byte offsets of the dat file
+ - samps_per_frame: The samples/frame for the signals of the dat file
- skew: The skew for the signals of the dat file
- sampfrom: The starting sample number to be read from the signals
- sampto: The final sample number to be read from the signals
- - smoothframes: Whether to smooth channels with multiple samples/frame
+ - smooth_frames: Whether to smooth channels with multiple samples/frame
"""
# Total number of samples per frame
- tsampsperframe = sum(sampsperframe)
+ tsamps_per_frame = sum(samps_per_frame)
# The signal length to read (per channel)
readlen = sampto - sampfrom
# Calculate parameters used to read and process the dat file
- startbyte, nreadsamples, blockfloorsamples, extraflatsamples, nanreplace = calc_read_params(fmt, siglen, byteoffset,
- skew, tsampsperframe,
+ startbyte, nreadsamples, blockfloorsamples, extraflatsamples, nanreplace = calc_read_params(fmt, sig_len, byte_offset,
+ skew, tsamps_per_frame,
sampfrom, sampto)
# Number of bytes to be read from the dat file
totalreadbytes = requiredbytenum('read', fmt, nreadsamples)
-
+
# Total samples to be processed in intermediate step. Includes extra padded samples beyond dat file
totalprocesssamples = nreadsamples + extraflatsamples
# Total number of bytes to be processed in intermediate step.
totalprocessbytes = requiredbytenum('read', fmt, totalprocesssamples)
-
+
# Get the intermediate bytes or samples to process. Bit of a discrepancy. Recall special formats
# load uint8 bytes, other formats already load samples.
# Read values from dat file, and append bytes/samples if needed.
if extraflatsamples:
- if fmt in specialfmts:
+ if fmt in special_fmts:
# Extra number of bytes to append onto the bytes read from the dat file.
extrabytenum = totalprocessbytes - totalreadbytes
- sigbytes = np.concatenate((getdatbytes(filename, dirname, pbdir, fmt, startbyte, nreadsamples),
+ sigbytes = np.concatenate((getdatbytes(file_name, dirname, pb_dir, fmt, startbyte, nreadsamples),
np.zeros(extrabytenum, dtype = np.dtype(dataloadtypes[fmt]))))
else:
- sigbytes = np.concatenate((getdatbytes(filename, dirname, pbdir, fmt, startbyte, nreadsamples),
+ sigbytes = np.concatenate((getdatbytes(file_name, dirname, pb_dir, fmt, startbyte, nreadsamples),
np.zeros(extraflatsamples, dtype = np.dtype(dataloadtypes[fmt]))))
else:
- sigbytes = getdatbytes(filename, dirname, pbdir, fmt, startbyte, nreadsamples)
+ sigbytes = getdatbytes(file_name, dirname, pb_dir, fmt, startbyte, nreadsamples)
# Continue to process the read values into proper samples
# For special fmts, Turn the bytes into actual samples
- if fmt in specialfmts:
+ if fmt in special_fmts:
sigbytes = bytes2samples(sigbytes, totalprocesssamples, fmt)
# Remove extra leading sample read within the byte block if any
if blockfloorsamples:
@@ -788,54 +816,54 @@ def rddat(filename, dirname, pbdir, fmt, nsig,
# final samples.
# No extra samples/frame. Obtain original uniform numpy array
- if tsampsperframe==nsig:
+ if tsamps_per_frame==n_sig:
# Reshape into multiple channels
- sig = sigbytes.reshape(-1, nsig)
+ sig = sigbytes.reshape(-1, n_sig)
# Skew the signal
- sig = skewsig(sig, skew, nsig, readlen, fmt, nanreplace)
+ sig = skewsig(sig, skew, n_sig, readlen, fmt, nanreplace)
# Extra frames present to be smoothed. Obtain averaged uniform numpy array
- elif smoothframes:
+ elif smooth_frames:
# Allocate memory for smoothed signal.
- sig = np.zeros((int(len(sigbytes)/tsampsperframe) , nsig), dtype=sigbytes.dtype)
+ sig = np.zeros((int(len(sigbytes)/tsamps_per_frame) , n_sig), dtype=sigbytes.dtype)
# Transfer and average samples
- for ch in range(nsig):
- if sampsperframe[ch] == 1:
- sig[:, ch] = sigbytes[sum(([0] + sampsperframe)[:ch + 1])::tsampsperframe]
+ for ch in range(n_sig):
+ if samps_per_frame[ch] == 1:
+ sig[:, ch] = sigbytes[sum(([0] + samps_per_frame)[:ch + 1])::tsamps_per_frame]
else:
if ch == 0:
startind = 0
else:
- startind = np.sum(sampsperframe[:ch])
- sig[:,ch] = [np.average(sigbytes[ind:ind+sampsperframe[ch]]) for ind in range(startind,len(sigbytes),tsampsperframe)]
+ startind = np.sum(samps_per_frame[:ch])
+ sig[:,ch] = [np.average(sigbytes[ind:ind+samps_per_frame[ch]]) for ind in range(startind,len(sigbytes),tsamps_per_frame)]
# Skew the signal
- sig = skewsig(sig, skew, nsig, readlen, fmt, nanreplace)
+ sig = skewsig(sig, skew, n_sig, readlen, fmt, nanreplace)
# Extra frames present without wanting smoothing. Return all expanded samples.
else:
# List of 1d numpy arrays
sig=[]
# Transfer over samples
- for ch in range(nsig):
+ for ch in range(n_sig):
# Indices of the flat signal that belong to the channel
- ch_indices = np.concatenate([np.array(range(sampsperframe[ch])) + sum([0]+sampsperframe[:ch]) + tsampsperframe*framenum for framenum in range(int(len(sigbytes)/tsampsperframe))])
+ ch_indices = np.concatenate([np.array(range(samps_per_frame[ch])) + sum([0]+samps_per_frame[:ch]) + tsamps_per_frame*framenum for framenum in range(int(len(sigbytes)/tsamps_per_frame))])
sig.append(sigbytes[ch_indices])
# Skew the signal
- sig = skewsig(sig, skew, nsig, readlen, fmt, nanreplace, sampsperframe)
+ sig = skewsig(sig, skew, n_sig, readlen, fmt, nanreplace, samps_per_frame)
# Integrity check of signal shape after reading
- checksigdims(sig, readlen, nsig, sampsperframe)
+ checksigdims(sig, readlen, n_sig, samps_per_frame)
return sig
-def calc_read_params(fmt, siglen, byteoffset, skew, tsampsperframe, sampfrom, sampto):
+def calc_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame, sampfrom, sampto):
"""
Calculate parameters used to read and process the dat file
-
+
Output arguments:
- - startbyte - The starting byte to read the dat file from. Always points to the start of a
+ - startbyte - The starting byte to read the dat file from. Always points to the start of a
byte block for special formats.
- nreadsamples - The number of flat samples to read from the dat file.
- blockfloorsamples - The extra samples read prior to the first desired sample, for special
@@ -846,7 +874,7 @@ def calc_read_params(fmt, siglen, byteoffset, skew, tsampsperframe, sampfrom, sa
Example Parameters:
- siglen=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5]
+ sig_len=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5]
sampfrom=0, sampto=100 --> readlen = 100, nsampread = 100*t, extralen = 5, nanreplace = [0, 2, 4, 5]
sampfrom=50, sampto=100 --> readlen = 50, nsampread = 50*t, extralen = 5, nanreplace = [0, 2, 4, 5]
sampfrom=0, sampto=50 --> readlen = 50, nsampread = 55*t, extralen = 0, nanreplace = [0, 0, 0, 0]
@@ -854,19 +882,19 @@ def calc_read_params(fmt, siglen, byteoffset, skew, tsampsperframe, sampfrom, sa
"""
# First flat sample number to read (if all channels were flattened)
- startflatsample = sampfrom * tsampsperframe
-
- #endflatsample = min((sampto + max(skew)-sampfrom), siglen) * tsampsperframe
+ startflatsample = sampfrom * tsamps_per_frame
+
+ #endflatsample = min((sampto + max(skew)-sampfrom), sig_len) * tsamps_per_frame
# Calculate the last flat sample number to read.
- # Cannot exceed siglen * tsampsperframe, the number of samples stored in the file.
+ # Cannot exceed sig_len * tsamps_per_frame, the number of samples stored in the file.
# If extra 'samples' are desired by the skew, keep track.
# Where was the -sampfrom derived from? Why was it in the formula?
- if (sampto + max(skew))>siglen:
- endflatsample = siglen*tsampsperframe
- extraflatsamples = (sampto + max(skew) - siglen) * tsampsperframe
+ if (sampto + max(skew))>sig_len:
+ endflatsample = sig_len*tsamps_per_frame
+ extraflatsamples = (sampto + max(skew) - sig_len) * tsamps_per_frame
else:
- endflatsample = (sampto + max(skew)) * tsampsperframe
+ endflatsample = (sampto + max(skew)) * tsamps_per_frame
extraflatsamples = 0
# Adjust the starting sample number to read from start of blocks for special fmts.
@@ -883,7 +911,7 @@ def calc_read_params(fmt, siglen, byteoffset, skew, tsampsperframe, sampfrom, sa
blockfloorsamples = 0
# The starting byte to read from
- startbyte = byteoffset + int(startflatsample * bytespersample[fmt])
+ startbyte = byte_offset + int(startflatsample * bytespersample[fmt])
# The number of samples to read
nreadsamples = endflatsample - startflatsample
@@ -891,8 +919,8 @@ def calc_read_params(fmt, siglen, byteoffset, skew, tsampsperframe, sampfrom, sa
# The number of samples to replace with nan at the end of each signal
# due to skew wanting samples beyond the file
- # Calculate this using the above statement case: if (sampto + max(skew))>siglen:
- nanreplace = [max(0, sampto + s - siglen) for s in skew]
+ # Calculate this using the above statement case: if (sampto + max(skew))>sig_len:
+ nanreplace = [max(0, sampto + s - sig_len) for s in skew]
return (startbyte, nreadsamples, blockfloorsamples, extraflatsamples, nanreplace)
@@ -934,13 +962,13 @@ def requiredbytenum(mode, fmt, nsamp):
return int(nbytes)
-def getdatbytes(filename, dirname, pbdir, fmt, startbyte, nsamp):
+def getdatbytes(file_name, dirname, pb_dir, fmt, startbyte, nsamp):
"""
Read bytes from a dat file, either local or remote, into a numpy array.
- Slightly misleading function name. Does not return bytes object.
+ Slightly misleading function name. Does not return bytes object.
Output argument dtype varies depending on fmt. Non-special fmts are
read in their final required format. Special format are read as uint8.
-
+
Input arguments:
- nsamp: The total number of samples to read. Does NOT need to create whole blocks
for special format. Any number of samples should be readable. But see below*.
@@ -965,8 +993,8 @@ def getdatbytes(filename, dirname, pbdir, fmt, startbyte, nsamp):
bytecount = nsamp*bytespersample[fmt]
# Local dat file
- if pbdir is None:
- fp = open(os.path.join(dirname, filename), 'rb')
+ if pb_dir is None:
+ fp = open(os.path.join(dirname, file_name), 'rb')
fp.seek(startbyte)
# Read file using corresponding dtype
@@ -977,7 +1005,7 @@ def getdatbytes(filename, dirname, pbdir, fmt, startbyte, nsamp):
# Stream dat file from physiobank
# Same output as above np.fromfile.
else:
- sigbytes = downloads.streamdat(filename, pbdir, fmt, bytecount, startbyte, dataloadtypes)
+ sigbytes = download.stream_dat(file_name, pb_dir, fmt, bytecount, startbyte, dataloadtypes)
return sigbytes
@@ -999,7 +1027,7 @@ def bytes2samples(sigbytes, nsamp, fmt):
sig = np.zeros(nsamp, dtype='int16')
# One sample pair is stored in one byte triplet.
-
+
# Even numbered samples
sig[0::2] = sigbytes[0::3] + 256 * np.bitwise_and(sigbytes[1::3], 0x0f)
# Odd numbered samples (len(sig) always >1 due to processing of whole blocks)
@@ -1009,7 +1037,7 @@ def bytes2samples(sigbytes, nsamp, fmt):
if addedsamps:
sig = sig[:-addedsamps]
- # Loaded values as unsigned. Convert to 2's complement form:
+ # Loaded values as un_signed. Convert to 2's complement form:
# values > 2^11-1 are negative.
sig[sig > 2047] -= 4096
@@ -1037,10 +1065,10 @@ def bytes2samples(sigbytes, nsamp, fmt):
if addedsamps:
sig = sig[:-addedsamps]
- # Loaded values as unsigned. Convert to 2's complement form:
+ # Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
-
+
elif fmt == '311':
# Easier to process when dealing with whole blocks
if nsamp % 3:
@@ -1060,68 +1088,68 @@ def bytes2samples(sigbytes, nsamp, fmt):
sig[1::3] = (sigbytes[1::4] >> 2)[0:len(sig[1::3])] + 64 * np.bitwise_and(sigbytes[2::4], 0x0f)[0:len(sig[1::3])]
# Third sample is 4 msb of third byte and 6 msb of forth byte
sig[2::3] = (sigbytes[2::4] >> 4)[0:len(sig[2::3])] + 16 * np.bitwise_and(sigbytes[3::4], 0x7f)[0:len(sig[2::3])]
-
+
# Remove trailing samples read within the byte block if originally not 3n sampled
if addedsamps:
sig = sig[:-addedsamps]
- # Loaded values as unsigned. Convert to 2's complement form:
+ # Loaded values as un_signed. Convert to 2's complement form:
# values > 2^9-1 are negative.
sig[sig > 511] -= 1024
return sig
-def skewsig(sig, skew, nsig, readlen, fmt, nanreplace, sampsperframe=None):
+def skewsig(sig, skew, n_sig, readlen, fmt, nanreplace, samps_per_frame=None):
"""
Skew the signal, insert nans and shave off end of array if needed.
fmt is just for the correct nan value.
- sampsperframe is only used for skewing expanded signals.
+ samps_per_frame is only used for skewing expanded signals.
"""
if max(skew)>0:
- # Expanded frame samples. List of arrays.
+ # Expanded frame samples. List of arrays.
if isinstance(sig, list):
# Shift the channel samples
- for ch in range(nsig):
+ for ch in range(n_sig):
if skew[ch]>0:
- sig[ch][:readlen*sampsperframe[ch]] = sig[ch][skew[ch]*sampsperframe[ch]:]
+ sig[ch][:readlen*samps_per_frame[ch]] = sig[ch][skew[ch]*samps_per_frame[ch]:]
# Shave off the extra signal length at the end
- for ch in range(nsig):
- sig[ch] = sig[ch][:readlen*sampsperframe[ch]]
+ for ch in range(n_sig):
+ sig[ch] = sig[ch][:readlen*samps_per_frame[ch]]
# Insert nans where skewed signal overran dat file
- for ch in range(nsig):
+ for ch in range(n_sig):
if nanreplace[ch]>0:
sig[ch][-nanreplace[ch]:] = digi_nan(fmt)
# Uniform array
else:
# Shift the channel samples
- for ch in range(nsig):
+ for ch in range(n_sig):
if skew[ch]>0:
sig[:readlen, ch] = sig[skew[ch]:, ch]
# Shave off the extra signal length at the end
sig = sig[:readlen, :]
# Insert nans where skewed signal overran dat file
- for ch in range(nsig):
+ for ch in range(n_sig):
if nanreplace[ch]>0:
sig[-nanreplace[ch]:, ch] = digi_nan(fmt)
return sig
-
+
# Integrity check of signal shape after reading
-def checksigdims(sig, readlen, nsig, sampsperframe):
+def checksigdims(sig, readlen, n_sig, samps_per_frame):
if isinstance(sig, np.ndarray):
- if sig.shape != (readlen, nsig):
+ if sig.shape != (readlen, n_sig):
raise ValueError('Samples were not loaded correctly')
else:
- if len(sig) != nsig:
+ if len(sig) != n_sig:
raise ValueError('Samples were not loaded correctly')
- for ch in range(nsig):
- if len(sig[ch]) != sampsperframe[ch] * readlen:
+ for ch in range(n_sig):
+ if len(sig[ch]) != samps_per_frame[ch] * readlen:
raise ValueError('Samples were not loaded correctly')
@@ -1157,15 +1185,15 @@ def digi_bounds(fmt):
return (-8388608, 8388607)
elif fmt == '32':
return (-2147483648, 2147483647)
-
-# Return nan value for the format type(s).
+
+# Return nan value for the format type(s).
def digi_nan(fmt):
if isinstance(fmt, list):
diginans = []
for f in fmt:
diginans.append(digi_nan(f))
return diginans
-
+
if fmt == '80':
return -128
if fmt == '310':
@@ -1186,65 +1214,80 @@ def digi_nan(fmt):
return -2147483648
-
-reslevels = np.power(2, np.arange(0,33))
-def estres(signals):
+def est_res(signals):
"""
- def estres(signals):
-
- Estimate the resolution of each signal in a multi-channel signal in bits. Maximum of 32 bits.
- Input arguments:
- - signals: A 2d numpy array representing a uniform multichannel signal, or a list of 1d numpy arrays
- representing multiple channels of signals with different numbers of samples per frame.
+ def est_res(signals):
+
+ Estimate the resolution of each signal in a multi-channel signal in
+ bits. Maximum of 32 bits.
+
+ Parameters
+ ----------
+ signals : numpy array, or list
+ A 2d numpy array representing a uniform multichannel signal, or
+ a list of 1d numpy arrays representing multiple channels of
+ signals with different numbers of samples per frame.
+
+ Returns
+ -------
+ res : list
+ A list of estimated integer resolutions for each channel
"""
-
- # Expanded sample signals. List of numpy arrays
+ res_levels = np.power(2, np.arange(0,33))
+ # Expanded sample signals. List of numpy arrays
if isinstance(signals, list):
- nsig = len(signals)
+ n_sig = len(signals)
# Uniform numpy array
else:
if signals.ndim ==1:
- nsig = 1
+ n_sig = 1
else:
- nsig = signals.shape[1]
+ n_sig = signals.shape[1]
res = []
-
- for ch in range(nsig):
- # Estimate the number of steps as the range divided by the minimum increment.
+
+ for ch in range(n_sig):
+ # Estimate the number of steps as the range divided by the
+ # minimum increment.
if isinstance(signals, list):
sortedsig = np.sort(signals[ch])
else:
sortedsig = np.sort(signals[:,ch])
-
+
min_inc = min(np.diff(sortedsig))
-
+
if min_inc == 0:
- # Case where signal is flat. Resolution is 0.
+ # Case where signal is flat. Resolution is 0.
res.append(0)
else:
nlevels = 1 + (sortedsig[-1]-sortedsig[0])/min_inc
- if nlevels>=reslevels[-1]:
+ if nlevels>=res_levels[-1]:
res.append(32)
else:
- res.append(np.where(reslevels>=nlevels)[0][0])
-
+ res.append(np.where(res_levels>=nlevels)[0][0])
+
return res
-# Return the most suitable wfdb format(s) to use given signal resolutions.
-# If singlefmt is True, the format for the maximum resolution will be returned.
-def wfdbfmt(res, singlefmt = True):
+def wfdbfmt(res, single_fmt=True):
+ """
+ Return the most suitable wfdb format(s) to use given signal
+ resolutions.
+ If single_fmt is True, the format for the maximum resolution will be returned.
+
+ Parameters
+
+ """
if isinstance(res, list):
# Return a single format
- if singlefmt is True:
+ if single_fmt:
res = [max(res)]*len(res)
fmts = []
for r in res:
fmts.append(wfdbfmt(r))
return fmts
-
+
if res<=8:
return '80'
elif res<=12:
@@ -1283,72 +1326,73 @@ def wfdbfmtres(fmt, maxres=False):
# Given the resolution of a signal, return the minimum
# dtype to store it
def npdtype(res, discrete):
-
+
if not hasattr(res, '__index__') or res>64:
raise TypeError('res must be integer based and <=64')
-
+
for npres in [8, 16, 32, 64]:
if res<=npres:
break
-
+
if discrete is True:
return 'int'+str(npres)
else:
return 'float'+str(npres)
-# Write a dat file.
-# All bytes are written one at a time
-# to avoid endianness issues.
-def wrdatfile(filename, fmt, d_signals, byteoffset, expanded=False, e_d_signals=None, sampsperframe=None):
- f=open(filename,'wb')
+def wr_dat_file(file_name, fmt, d_signal, byte_offset, expanded=False,
+ e_d_signal=None, samps_per_frame=None, write_dir=''):
+ """
+ Write a dat file. All bytes are written one at a time to avoid
+ endianness issues.
+
+ """
# Combine list of arrays into single array
if expanded:
- nsig = len(e_d_signals)
- siglen = int(len(e_d_signals[0])/sampsperframe[0])
+ n_sig = len(e_d_signal)
+ sig_len = int(len(e_d_signal[0])/samps_per_frame[0])
# Effectively create MxN signal, with extra frame samples acting like extra channels
- d_signals = np.zeros((siglen, sum(sampsperframe)), dtype = 'int64')
+ d_signal = np.zeros((sig_len, sum(samps_per_frame)), dtype = 'int64')
# Counter for channel number
expand_ch = 0
- for ch in range(nsig):
- spf = sampsperframe[ch]
+ for ch in range(n_sig):
+ spf = samps_per_frame[ch]
for framenum in range(spf):
- d_signals[:, expand_ch] = e_d_signals[ch][framenum::spf]
+ d_signal[:, expand_ch] = e_d_signal[ch][framenum::spf]
expand_ch = expand_ch + 1
-
- # This nsig is used for making list items.
+
+ # This n_sig is used for making list items.
# Does not necessarily represent number of signals (ie. for expanded=True)
- nsig = d_signals.shape[1]
+ n_sig = d_signal.shape[1]
if fmt == '80':
# convert to 8 bit offset binary form
- d_signals = d_signals + 128
+ d_signal = d_signal + 128
# Concatenate into 1D
- d_signals = d_signals.reshape(-1)
- # Convert to unsigned 8 bit dtype to write
- bwrite = d_signals.astype('uint8')
+ d_signal = d_signal.reshape(-1)
+ # Convert to un_signed 8 bit dtype to write
+ bwrite = d_signal.astype('uint8')
elif fmt == '212':
+ # Each sample is represented by a 12 bit two's complement amplitude.
+ # The first sample is obtained from the 12 least significant bits of the first byte pair (stored least significant byte first).
+ # The second sample is formed from the 4 remaining bits of the first byte pair (which are the 4 high bits of the 12-bit sample)
+ # and the next byte (which contains the remaining 8 bits of the second sample).
+ # The process is repeated for each successive pair of samples.
- # Each sample is represented by a 12 bit two's complement amplitude.
- # The first sample is obtained from the 12 least significant bits of the first byte pair (stored least significant byte first).
- # The second sample is formed from the 4 remaining bits of the first byte pair (which are the 4 high bits of the 12-bit sample)
- # and the next byte (which contains the remaining 8 bits of the second sample).
- # The process is repeated for each successive pair of samples.
+ # convert to 12 bit two's complement
+ d_signal[d_signal<0] = d_signal[d_signal<0] + 4096
- # convert to 12 bit two's complement
- d_signals[d_signals<0] = d_signals[d_signals<0] + 4096
-
# Concatenate into 1D
- d_signals = d_signals.reshape(-1)
+ d_signal = d_signal.reshape(-1)
- nsamp = len(d_signals)
+ nsamp = len(d_signal)
# use this for byte processing
processnsamp = nsamp
- # Odd numbered number of samples. Fill in extra blank for following byte calculation.
+ # Odd numbered number of samples. Fill in extra blank for following byte calculation.
if processnsamp % 2:
- d_signals = np.concatenate([d_signals, np.array([0])])
+ d_signal = np.concatenate([d_signal, np.array([0])])
processnsamp +=1
# The individual bytes to write
@@ -1357,78 +1401,83 @@ def wrdatfile(filename, fmt, d_signals, byteoffset, expanded=False, e_d_signals=
# Fill in the byte triplets
# Triplet 1 from lowest 8 bits of sample 1
- bwrite[0::3] = d_signals[0::2] & 255
+ bwrite[0::3] = d_signal[0::2] & 255
# Triplet 2 from highest 4 bits of samples 1 (lower) and 2 (upper)
- bwrite[1::3] = ((d_signals[0::2] & 3840) >> 8) + ((d_signals[1::2] & 3840) >> 4)
+ bwrite[1::3] = ((d_signal[0::2] & 3840) >> 8) + ((d_signal[1::2] & 3840) >> 4)
# Triplet 3 from lowest 8 bits of sample 2
- bwrite[2::3] = d_signals[1::2] & 255
+ bwrite[2::3] = d_signal[1::2] & 255
# If we added an extra sample for byte calculation, remove the last byte (don't write)
if nsamp % 2:
bwrite = bwrite[:-1]
elif fmt == '16':
- # convert to 16 bit two's complement
- d_signals[d_signals<0] = d_signals[d_signals<0] + 65536
+ # convert to 16 bit two's complement
+ d_signal[d_signal<0] = d_signal[d_signal<0] + 65536
# Split samples into separate bytes using binary masks
- b1 = d_signals & [255]*nsig
- b2 = ( d_signals & [65280]*nsig ) >> 8
- # Interweave the bytes so that the same samples' bytes are consecutive
+ b1 = d_signal & [255]*n_sig
+ b2 = ( d_signal & [65280]*n_sig ) >> 8
+ # Interweave the bytes so that the same samples' bytes are consecutive
b1 = b1.reshape((-1, 1))
b2 = b2.reshape((-1, 1))
bwrite = np.concatenate((b1, b2), axis=1)
bwrite = bwrite.reshape((1,-1))[0]
- # Convert to unsigned 8 bit dtype to write
+ # Convert to un_signed 8 bit dtype to write
bwrite = bwrite.astype('uint8')
elif fmt == '24':
- # convert to 24 bit two's complement
- d_signals[d_signals<0] = d_signals[d_signals<0] + 16777216
+ # convert to 24 bit two's complement
+ d_signal[d_signal<0] = d_signal[d_signal<0] + 16777216
# Split samples into separate bytes using binary masks
- b1 = d_signals & [255]*nsig
- b2 = ( d_signals & [65280]*nsig ) >> 8
- b3 = ( d_signals & [16711680]*nsig ) >> 16
- # Interweave the bytes so that the same samples' bytes are consecutive
+ b1 = d_signal & [255]*n_sig
+ b2 = ( d_signal & [65280]*n_sig ) >> 8
+ b3 = ( d_signal & [16711680]*n_sig ) >> 16
+ # Interweave the bytes so that the same samples' bytes are consecutive
b1 = b1.reshape((-1, 1))
b2 = b2.reshape((-1, 1))
b3 = b3.reshape((-1, 1))
bwrite = np.concatenate((b1, b2, b3), axis=1)
bwrite = bwrite.reshape((1,-1))[0]
- # Convert to unsigned 8 bit dtype to write
+ # Convert to un_signed 8 bit dtype to write
bwrite = bwrite.astype('uint8')
-
+
elif fmt == '32':
- # convert to 32 bit two's complement
- d_signals[d_signals<0] = d_signals[d_signals<0] + 4294967296
+ # convert to 32 bit two's complement
+ d_signal[d_signal<0] = d_signal[d_signal<0] + 4294967296
# Split samples into separate bytes using binary masks
- b1 = d_signals & [255]*nsig
- b2 = ( d_signals & [65280]*nsig ) >> 8
- b3 = ( d_signals & [16711680]*nsig ) >> 16
- b4 = ( d_signals & [4278190080]*nsig ) >> 24
- # Interweave the bytes so that the same samples' bytes are consecutive
+ b1 = d_signal & [255]*n_sig
+ b2 = ( d_signal & [65280]*n_sig ) >> 8
+ b3 = ( d_signal & [16711680]*n_sig ) >> 16
+ b4 = ( d_signal & [4278190080]*n_sig ) >> 24
+ # Interweave the bytes so that the same samples' bytes are consecutive
b1 = b1.reshape((-1, 1))
b2 = b2.reshape((-1, 1))
b3 = b3.reshape((-1, 1))
b4 = b4.reshape((-1, 1))
bwrite = np.concatenate((b1, b2, b3, b4), axis=1)
bwrite = bwrite.reshape((1,-1))[0]
- # Convert to unsigned 8 bit dtype to write
+ # Convert to un_signed 8 bit dtype to write
bwrite = bwrite.astype('uint8')
else:
raise ValueError('This library currently only supports writing the following formats: 80, 16, 24, 32')
-
+
# Byte offset in the file
- if byteoffset is not None and byteoffset>0:
- print('Writing file '+filename+' with '+str(byteoffset)+' empty leading bytes')
- bwrite = np.append(np.zeros(byteoffset, dtype = 'uint8'), bwrite)
+ if byte_offset is not None and byte_offset>0:
+ print('Writing file '+file_name+' with '+str(byte_offset)+' empty leading bytes')
+ bwrite = np.append(np.zeros(byte_offset, dtype = 'uint8'), bwrite)
+
+ f=open(os.path.join(write_dir, file_name),'wb')
# Write the file
bwrite.tofile(f)
f.close()
-# Returns the unique elements in a list in the order that they appear.
-# Also returns the indices of the original list that correspond to each output element.
+
def orderedsetlist(fulllist):
+ """
+ Returns the unique elements in a list in the order that they appear.
+ Also returns the indices of the original list that correspond to each output element.
+ """
uniquelist = []
original_inds = {}
@@ -1444,10 +1493,16 @@ def orderedsetlist(fulllist):
return uniquelist, original_inds
-# Round down to nearest
+
def downround(x, base):
+ """
+ Round down to nearest
+ """
return base * math.floor(float(x)/base)
-# Round up to nearest
+
def upround(x, base):
+ """
+ Round up to nearest
+ """
return base * math.ceil(float(x)/base)
diff --git a/wfdb/readwrite/annotations.py b/wfdb/io/annotation.py
similarity index 77%
rename from wfdb/readwrite/annotations.py
rename to wfdb/io/annotation.py
index b4712e93..bb7981f2 100644
--- a/wfdb/readwrite/annotations.py
+++ b/wfdb/io/annotation.py
@@ -1,60 +1,83 @@
+import copy
import numpy as np
+import os
import pandas as pd
import re
-import os
-import copy
-from . import records
-from . import _headers
-from . import downloads
-# Class for WFDB annotations
+from . import download
+from . import _header
+from . import record
+
+
class Annotation(object):
"""
- The class representing WFDB annotations.
-
- Annotation objects can be created using the constructor, or by reading a WFDB annotation
- file with 'rdann'.
-
- The attributes of the Annotation object give information about the annotation as specified
- by https://www.physionet.org/physiotools/wag/annot-5.htm:
- - recordname: The base file name (without extension) of the record that the annotation
- is attached to.
- - extension: The file extension of the file the annotation is stored in.
- - sample: The annotation locations in samples relative to the beginning of the record.
- - symbol: The symbol used to display each annotation label.
- - subtype: The marked class/category of each annotation.
- - chan: The signal channel associated with each annotations.
- - num: The labelled annotation number for each annotation.
- - aux_note: The auxiliary information string for each annotation.
- - fs: The sampling frequency of the record, if available.
- - label_store: The integer value used to store/encode each annotation label
- - description: The descriptive string of each annotation label
- - custom_labels: The custom annotation labels defined in the annotation file.
- Maps the relationship between the three label fields.
- The data type is a pandas DataFrame with three columns: ['label_store', 'symbol', 'description']
- - contained_labels: The unique labels contained in this annotation. Same structure
- as custom_labels.
-
- Constructor function:
- def __init__(self, recordname, extension, sample, symbol=None, subtype=None,
- chan=None, num=None, aux_note=None, fs=None, label_store=None,
- description=None, custom_labels=None, contained_labels=None)
-
- Call 'show_ann_labels()' to see the list of standard annotation codes. Any text used to label
- annotations that are not one of these codes should go in the 'aux_note' field rather than the
- 'sym' field.
-
- Example usage:
- import wfdb
- ann1 = wfdb.Annotation(recordname='rec1', extension='atr', sample=[10,20,400],
- symbol = ['N','N','['], aux_note=[None, None, 'Serious Vfib'])
+ The class representing WFDB annotations.
+
+ Annotation objects can be created using the initializer, or by reading a
+ WFDB annotation file with `rdann`.
+
+ The attributes of the Annotation object give information about the
+ annotation as specified by:
+ https://www.physionet.org/physiotools/wag/annot-5.htm
+
+ Call `show_ann_labels()` to see the list of standard annotation codes. Any
+ text used to label annotations that are not one of these codes should go in
+ the 'aux_note' field rather than the 'sym' field.
+
+ Examples
+ --------
+ >>> ann1 = wfdb.Annotation(record_name='rec1', extension='atr',
+ sample=[10,20,400], symbol=['N','N','['],
+ aux_note=[None, None, 'Serious Vfib'])
+
"""
-
- def __init__(self, recordname, extension, sample, symbol=None, subtype=None,
- chan=None, num=None, aux_note=None, fs=None, label_store=None,
- description=None, custom_labels=None, contained_labels=None):
- self.recordname = recordname
+ def __init__(self, record_name, extension, sample, symbol=None,
+ subtype=None, chan=None, num=None, aux_note=None, fs=None,
+ label_store=None, description=None, custom_labels=None,
+ contained_labels=None):
+ """
+ Parameters
+ ----------
+ record_name : str
+ The base file name (without extension) of the record that the
+ annotation is associated with.
+ extension : str
+ The file extension of the file the annotation is stored in.
+ sample : numpy array
+ A numpy array containing the annotation locations in samples relative to
+ the beginning of the record.
+ symbol : list, or numpy array, optional
+ The symbols used to display the annotation labels. List or numpy array.
+ If this field is present, `label_store` must not be present.
+ subtype : numpy array, optional
+ A numpy array containing the marked class/category of each annotation.
+ chan : numpy array, optional
+ A numpy array containing the signal channel associated with each
+ annotation.
+ num : numpy array, optional
+ A numpy array containing the labelled annotation number for each
+ annotation.
+ aux_note : list, optional
+ A list containing the auxiliary information string (or None for
+ annotations without notes) for each annotation.
+ fs : int, or float, optional
+ The sampling frequency of the record.
+ label_store : numpy array, optional
+ The integer value used to store/encode each annotation label
+ description : list, optional
+ A list containing the descriptive string of each annotation label.
+ custom_labels : pandas dataframe, optional
+ The custom annotation labels defined in the annotation file. Maps
+ the relationship between the three label fields. The data type is a
+ pandas DataFrame with three columns:
+ ['label_store', 'symbol', 'description']
+ contained_labels : pandas dataframe, optional
+ The unique labels contained in this annotation. Same structure as
+ `custom_labels`.
+
+ """
+ self.record_name = record_name
self.extension = extension
self.sample = sample
@@ -72,6 +95,8 @@ def __init__(self, recordname, extension, sample, symbol=None, subtype=None,
self.custom_labels = custom_labels
self.contained_labels = contained_labels
+ self.ann_len = len(self.sample)
+
#__label_map__: (storevalue, symbol, description) hidden attribute
# Equal comparison operator for objects of this type
@@ -106,19 +131,36 @@ def __eq__(self, other):
return True
- # Write an annotation file
- def wrann(self, writefs=False):
+ def apply_range(self, sampfrom=0, sampto=None):
+ """
+ Filter the annotation attributes to keep only items between the
+ desired sample values
+
+ """
+ sampto = sampto or self.sample[-1]
+
+ kept_inds = np.intersect1d(np.where(self.sample>=sampfrom),
+ np.where(self.sample<=sampto))
+
+
+ for field in ['sample', 'label_store', 'subtype', 'chan', 'num']:
+ setattr(self, field, getattr(self, field)[kept_inds])
+
+ self.aux_note = [self.aux_note[i] for i in kept_inds]
+
+ self.ann_len = len(self.sample)
+
+ def wrann(self, write_fs=False, write_dir=''):
"""
- Instance method to write a WFDB annotation file from an Annotation object.
-
- def wrann(self, writefs=False)
-
- Input Parameters:
- - writefs (default=False): Flag specifying whether to write the fs
- attribute to the file.
+ Write a WFDB annotation file from this object.
+
+ Parameters
+ ----------
+ write_fs : bool, optional
+ Whether to write the `fs` attribute to the file.
"""
- for field in ['recordname', 'extension']:
+ for field in ['record_name', 'extension']:
if getattr(self, field) is None:
raise Exception('Missing required field for writing annotation file: ',field)
@@ -127,7 +169,7 @@ def wrann(self, writefs=False)
raise Exception('At least one annotation label field is required to write the annotation: ', ann_label_fields)
# Check the validity of individual fields
- self.checkfields()
+ self.check_fields()
# Standardize the format of the custom_labels field
self.standardize_custom_labels()
@@ -137,16 +179,17 @@ def wrann(self, writefs=False)
# Check the cohesion of fields
self.check_field_cohesion(present_label_fields)
-
+
# Calculate the label_store field if necessary
if 'label_store' not in present_label_fields:
- self.convert_label_attribute(source_field=present_label_fields[0], target_field='label_store')
+ self.convert_label_attribute(source_field=present_label_fields[0],
+ target_field='label_store')
# Write the header file using the specified fields
- self.wrannfile(writefs=writefs)
+ self.wr_ann_file(write_fs=write_fs, write_dir=write_dir)
return
-
+
def get_label_fields(self):
"""
Get the present label fields in the object
@@ -159,17 +202,17 @@ def get_label_fields(self):
return present_label_fields
# Check the set fields of the annotation object
- def checkfields(self):
+ def check_fields(self):
# Check all set fields
for field in ann_field_types:
if getattr(self, field) is not None:
# Check the type of the field's elements
- self.checkfield(field)
+ self.check_field(field)
return
# Check a particular annotation field
- def checkfield(self, field):
+ def check_field(self, field):
item = getattr(self, field)
@@ -177,13 +220,13 @@ def checkfield(self, field):
raise TypeError('The '+field+' field must be one of the following types:', ann_field_types[field])
if field in int_ann_fields:
- if item.dtype not in _headers.int_dtypes:
+ if item.dtype not in _header.int_dtypes:
raise TypeError('The '+field+' field must have an integer-based dtype.')
# Field specific checks
- if field == 'recordname':
- if bool(re.search('[^-\w]', self.recordname)):
- raise ValueError('recordname must only comprise of letters, digits, hyphens, and underscores.')
+ if field == 'record_name':
+ if bool(re.search('[^-\w]', self.record_name)):
+ raise ValueError('record_name must only comprise of letters, digits, hyphens, and underscores.')
elif field == 'extension':
if bool(re.search('[^a-zA-Z]', self.extension)):
raise ValueError('extension must only comprise of letters.')
@@ -242,7 +285,7 @@ def checkfield(self, field):
if label_store:
if not hasattr(label_store[i], '__index__'):
raise TypeError('The label_store values of the '+field+' field must be integer-like')
-
+
if not isinstance(symbol[i], strtypes) or len(symbol[i]) not in [1,2,3]:
raise ValueError('The symbol values of the '+field+' field must be strings of length 1 to 3')
@@ -263,7 +306,7 @@ def checkfield(self, field):
for e in uniq_elements:
if not isinstance(e, strtypes):
raise TypeError('Subelements of the '+field+' field must be strings')
-
+
if field == 'symbol':
for e in uniq_elements:
if len(e) not in [1,2,3]:
@@ -274,7 +317,7 @@ def checkfield(self, field):
for e in uniq_elements:
if bool(re.search('[\t\n\r\f\v]', e)):
raise ValueError('Subelements of the '+field+' field must not contain tabs or newlines')
-
+
elif field == 'sample':
if len(self.sample) == 1:
sampdiffs = np.array([self.sample[0]])
@@ -288,18 +331,18 @@ def checkfield(self, field):
raise ValueError("The 'sample' field must contain monotonically increasing sample numbers")
if max(sampdiffs) > 2147483648:
raise ValueError('WFDB annotation files cannot store sample differences greater than 2**31')
-
+
elif field == 'label_store':
if min(item) < 1 or max(item) > 49:
raise ValueError('The label_store values must be between 1 and 49')
- # The C WFDB library stores num/sub/chan as chars.
+ # The C WFDB library stores num/sub/chan as chars.
elif field == 'subtype':
# signed character
if min(self.subtype) < 0 or max(self.subtype) >127:
raise ValueError("The 'subtype' field must only contain non-negative integers up to 127")
elif field == 'chan':
- # unsigned character
+ # un_signed character
if min(self.chan) < 0 or max(self.chan) >255:
raise ValueError("The 'chan' field must only contain non-negative integers up to 255")
elif field == 'num':
@@ -324,7 +367,7 @@ def check_field_cohesion(self, present_label_fields):
raise ValueError("The lengths of the 'sample' and '"+field+"' fields do not match")
# Ensure all label fields are defined by the label map. This has to be checked because
- # it is possible the user defined (or lack of) custom_labels does not capture all the
+ # it is possible the user defined (or lack of) custom_labels does not capture all the
# labels present.
for field in present_label_fields:
defined_values = self.__label_map__[field].values
@@ -355,11 +398,11 @@ def standardize_custom_labels(self):
label map.
2. The unused label store values. This is extracted by finding the
set of all labels contained in this annotation object and seeing
- which symbols/descriptions are not used.
+ which symbols/descriptions are not used.
If there are more custom labels defined than there are enough spaces,
even in condition 2 from above, this function will raise an error.
-
+
This function must work when called as a standalone.
"""
custom_labels = self.custom_labels
@@ -367,7 +410,7 @@ def standardize_custom_labels(self):
if custom_labels is None:
return
- self.checkfield('custom_labels')
+ self.check_field('custom_labels')
# Convert to dataframe if not already
if not isinstance(custom_labels, pd.DataFrame):
@@ -384,7 +427,7 @@ def standardize_custom_labels(self):
# Assign label_store values to the custom labels if not defined
if 'label_store' not in list(custom_labels):
undefined_label_stores = self.get_undefined_label_stores()
-
+
if len(custom_labels) > len(undefined_label_stores):
available_label_stores = self.get_available_label_stores()
else:
@@ -403,7 +446,7 @@ def standardize_custom_labels(self):
self.custom_labels = custom_labels
return
-
+
def get_undefined_label_stores(self):
"""
Get the label_store values not defined in the
@@ -412,7 +455,7 @@ def get_undefined_label_stores(self):
return list(set(range(50)) - set(ann_label_table['label_store']))
- def get_available_label_stores(self, usefield = 'tryall'):
+ def get_available_label_stores(self, usefield='tryall'):
"""
Get the label store values that may be used
for writing this annotation.
@@ -423,7 +466,7 @@ def get_available_label_stores(self, usefield = 'tryall'):
annotation object.
- the store values whose standard wfdb symbols/descriptions
match those of the custom labels (if custom_labels exists)
-
+
If 'usefield' is explicitly specified, the function will use that
field to figure out available label stores. If 'usefield'
is set to 'tryall', the function will choose one of the contained
@@ -431,7 +474,7 @@ def get_available_label_stores(self, usefield = 'tryall'):
"""
- # Figure out which field to use to get available labels stores.
+ # Figure out which field to use to get available labels stores.
if usefield == 'tryall':
if self.label_store is not None:
usefield = 'label_store'
@@ -458,7 +501,7 @@ def get_available_label_stores(self, usefield = 'tryall'):
unused_field = set(ann_label_table[usefield].values) - contained_field
unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, 'label_store'].values
- # Get the standard wfdb label_store values overwritten by the
+ # Get the standard wfdb label_store values overwritten by the
# custom_labels if any
if self.custom_symbols is not None:
custom_field = set(self.get_custom_label_attribute(usefield))
@@ -475,7 +518,7 @@ def get_available_label_stores(self, usefield = 'tryall'):
undefined_label_stores = self.get_undefined_label_stores()
# Final available label stores = undefined + unused + overwritten
available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores)
-
+
return available_label_stores
@@ -493,12 +536,12 @@ def get_custom_label_attribute(self, attribute):
if isinstance(self.custom_labels, pd.DataFrame):
if 'label_store' not in list(self.custom_labels):
- raise ValueError('label_store not defined in custom_labels')
+ raise ValueError('label_store not defined in custom_labels')
a = list(self.custom_labels[attribute].values)
else:
if len(self.custom_labels[0]) == 2:
if attribute == 'label_store':
- raise ValueError('label_store not defined in custom_labels')
+ raise ValueError('label_store not defined in custom_labels')
elif attribute == 'symbol':
a = [l[0] for l in self.custom_labels]
elif attribute == 'description':
@@ -528,21 +571,21 @@ def create_label_map(self, inplace=True):
self.standardize_custom_labels()
for i in self.custom_labels.index:
label_map.loc[i] = self.custom_labels.loc[i]
-
+
if inplace:
self.__label_map__ = label_map
else:
return label_map
- def wrannfile(self, writefs):
+ def wr_ann_file(self, write_fs, write_dir=''):
"""
Calculate the bytes used to encode an annotation set and
write them to an annotation file
"""
# Calculate the fs bytes to write if present and desired to write
- if writefs:
+ if write_fs:
fs_bytes = self.calc_fs_bytes()
else:
fs_bytes = []
@@ -558,10 +601,12 @@ def wrannfile(self, writefs):
end_special_bytes = [0, 236, 255, 255, 255, 255, 1, 0]
# Write the file
- with open(self.recordname+'.'+self.extension, 'wb') as f:
+ with open(os.path.join(write_dir, self.record_name+'.'+self.extension),
+ 'wb') as f:
# Combine all bytes to write: fs (if any), custom annotations (if any), main content, file terminator
- np.concatenate((fs_bytes, cl_bytes, end_special_bytes, core_bytes, np.array([0,0]))).astype('u1').tofile(f)
-
+ np.concatenate((fs_bytes, cl_bytes, end_special_bytes, core_bytes,
+ np.array([0,0]))).astype('u1').tofile(f)
+
return
# Calculate the bytes written to the annotation file for the fs field
@@ -571,7 +616,8 @@ def calc_fs_bytes(self):
return []
# Initial indicators of encoding fs
- data_bytes = [0,88, 0, 252,35,35,32,116,105,109,101,32,114,101,115,111,108,117,116,105,111,110,58,32]
+ data_bytes = [0, 88, 0, 252, 35, 35, 32, 116, 105, 109, 101, 32, 114,
+ 101, 115, 111, 108, 117, 116, 105, 111, 110, 58, 32]
# Check if fs is close enough to int
if isinstance(self.fs, float):
@@ -593,7 +639,8 @@ def calc_fs_bytes(self):
return np.array(data_bytes).astype('u1')
- # Calculate the bytes written to the annotation file for the custom_labels field
+ # Calculate the bytes written to the annotation file for the
+ # custom_labels field
def calc_cl_bytes(self):
if self.custom_labels is None:
@@ -692,7 +739,7 @@ def compact_fields(self):
self.subtype = list(self.subtype)
for i in zero_inds:
self.subtype[i] = None
-
+
# Empty aux_note strings are not written
if self.aux_note is not None:
for i in range(nannots):
@@ -701,10 +748,10 @@ def compact_fields(self):
if np.array_equal(self.aux_note, [None]*nannots):
self.aux_note = None
-
+
def sym_to_aux(self):
- # Move non-encoded symbol elements into the aux_note field
- self.checkfield('symbol')
+ # Move non-encoded symbol elements into the aux_note field
+ self.check_field('symbol')
# Non-encoded symbols
label_table_map = self.create_label_map(inplace=False)
@@ -747,7 +794,7 @@ def get_contained_labels(self, inplace=True):
to others except rdann.
"""
if self.custom_labels is not None:
- self.checkfield('custom_labels')
+ self.check_field('custom_labels')
# Create the label map
label_map = ann_label_table.copy()
@@ -757,7 +804,8 @@ def get_contained_labels(self, inplace=True):
custom_labels = label_triplets_to_df(self.custom_labels)
elif isinstance(self.custom_labels, pd.DataFrame):
# Set the index just in case it doesn't already match the label_store
- self.custom_labels.set_index(self.custom_labels['label_store'].values, inplace=True)
+ self.custom_labels.set_index(
+ self.custom_labels['label_store'].values, inplace=True)
custom_labels = self.custom_labels
else:
custom_labels = None
@@ -788,7 +836,8 @@ def get_contained_labels(self, inplace=True):
contained_labels = label_map.loc[index_vals, :]
if reset_index:
- contained_labels.set_index(contained_labels['label_store'].values, inplace=True)
+ contained_labels.set_index(contained_labels['label_store'].values,
+ inplace=True)
if inplace:
self.contained_labels = contained_labels
@@ -815,7 +864,8 @@ def set_label_elements(self, wanted_label_elements):
for e in missing_elements:
self.convert_label_attribute(contained_elements[0], e)
- unwanted_label_elements = list(set(ann_label_fields) - set(wanted_label_elements))
+ unwanted_label_elements = list(set(ann_label_fields)
+ - set(wanted_label_elements))
self.rm_attributes(unwanted_label_elements)
@@ -828,7 +878,8 @@ def rm_attributes(self, attributes):
setattr(self, a, None)
return
- def convert_label_attribute(self, source_field, target_field, inplace=True, overwrite=True):
+ def convert_label_attribute(self, source_field, target_field, inplace=True,
+ overwrite=True):
"""
Convert one label attribute (label_store, symbol, or description) to another.
Input arguments:
@@ -836,7 +887,7 @@ def convert_label_attribute(self, source_field, target_field, inplace=True, over
- overwrite - if True, performs conversion and replaces target field attribute even if the
target attribute already has a value. If False, does not perform conversion in the aforementioned case.
Set to True (do conversion) if inplace=False.
-
+
Creates mapping df on the fly based on ann_label_table and self.custom_labels
"""
if inplace and not overwrite:
@@ -862,12 +913,13 @@ def label_triplets_to_df(triplets):
"""
Get a pd dataframe from a tuple triplets
used to define annotation labels.
-
+
The triplets should come in the
form: (label_store, symbol, description)
"""
- label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets], dtype='int'),
+ label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets],
+ dtype='int'),
'symbol':[t[1] for t in triplets],
'description':[t[2] for t in triplets]})
@@ -885,7 +937,7 @@ def custom_triplet_bytes(custom_triplet):
# Structure: 0, NOTE, len(aux_note), aux_note, codenumber, space, codesymbol, space, description, (0 null if necessary)
# Remember, aux_note string includes 'number(s)''
annbytes = [0, 88, len(custom_triplet[2]) + 3 + len(str(custom_triplet[0])), 252] + [ord(c) for c in str(custom_triplet[0])] \
- + [32] + [ord(custom_triplet[1])] + [32] + [ord(c) for c in custom_triplet[2]]
+ + [32] + [ord(custom_triplet[1])] + [32] + [ord(c) for c in custom_triplet[2]]
if len(annbytes) % 2:
annbytes.append(0)
@@ -981,80 +1033,110 @@ def field2bytes(field, value):
# - First byte stores length of aux_note field
# - Second byte stores 63*4 indicator
# - Then store the aux_note string characters
- data_bytes = [len(value), 252] + [ord(i) for i in value]
+ data_bytes = [len(value), 252] + [ord(i) for i in value]
# Zero pad odd length aux_note strings
- if len(value) % 2:
+ if len(value) % 2:
data_bytes.append(0)
return data_bytes
-# Function for writing annotations
-def wrann(recordname, extension, sample, symbol=None, subtype=None, chan=None, num=None,
- aux_note=None, label_store=None, fs=None, custom_labels=None):
+def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None,
+ num=None, aux_note=None, label_store=None, fs=None,
+ custom_labels=None, write_dir=''):
"""
Write a WFDB annotation file.
Specify at least the following:
- - The record name of the WFDB record (recordname)
+ - The record name of the WFDB record (record_name)
- The annotation file extension (extension)
- - The annotation locations in samples relative to the beginning of the record (sample)
- - Either the numerical values used to store the labels (label_store), or more commonly,
- the display symbols of each label (symbol).
-
-
- Usage:
- wrann(recordname, extension, sample, symbol=None, subtype=None, chan=None, num=None,
- aux_note=None, label_store=None, fs=None, custom_labels=None):
-
- Input arguments:
- - recordname (required): The string name of the WFDB record to be written (without any file extensions).
- - extension (required): The string annotation file extension.
- - sample (required): The annotation location in samples relative to the beginning of the record. Numpy array.
- - symbol (default=None): The symbols used to display the annotation labels. List or numpy array. If this field
- is present, 'label_store' must not be present.
- - subtype (default=None): The marked class/category of each annotation. Numpy array.
- - chan (default=None): The signal channel associated with each annotation. Numpy array.
- - num (default=None): The labelled annotation number of each annotation. Numpy array.
- - aux_note (default=None): The auxiliary information strings. List or numpy array.
- - label_store (default=None): The integer values used to store the annotation labels. Numpy array.
- If this field is present, 'symbol' must not be present.
- - fs (default=None): The numerical sampling frequency of the record to be written to the file.
- - custom_labels (default=None): The map of custom defined annotation labels used for this annotation, in
- addition to the standard WFDB annotation labels. The custom labels are defined by two or three fields:
- - The integer values used to store custom annotation labels in the file (optional)
- - Their short display symbols
- - Their long descriptions.
- This input argument may come in four formats:
- 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description']
- 2. A pandas.DataFrame object with columns: ['symbol', 'description']
- If this option is chosen, label_store values are automatically chosen.
- 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description).
- 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description).
- If this option is chosen, label_store values are automatically chosen.
- If the 'label_store' field is given for this function, and 'custom_labels' is defined, 'custom_labels'
- must contain 'label_store' in its mapping. ie. it must come in format 1 or 3 above.
-
- Note: This gateway function was written to enable a simple way to write WFDB annotation files without
- needing to explicity create an Annotation object beforehand.
-
- You may also create an Annotation object, manually set its attributes, and call its wrann() instance method.
-
- Note: Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields
- may or may not be present.
-
- Example Usage:
- import wfdb
- # Read an annotation as an Annotation object
- annotation = wfdb.rdann('b001', 'atr', pbdir='cebsdb')
- # Call the gateway wrann function, manually inserting fields as function input parameters
- wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
+ - The annotation locations in samples relative to the beginning of
+ the record (sample)
+ - Either the numerical values used to store the labels
+ (`label_store`), or more commonly, the display symbols of each
+ label (`symbol`).
+
+ Parameters
+ ----------
+ record_name : str
+ The string name of the WFDB record to be written (without any file
+ extensions).
+ extension : str
+ The string annotation file extension.
+ sample : numpy array
+ A numpy array containing the annotation locations in samples relative to
+ the beginning of the record.
+ symbol : list, or numpy array, optional
+ The symbols used to display the annotation labels. List or numpy array.
+ If this field is present, `label_store` must not be present.
+ subtype : numpy array, optional
+ A numpy array containing the marked class/category of each annotation.
+ chan : numpy array, optional
+ A numpy array containing the signal channel associated with each
+ annotation.
+ num : numpy array, optional
+ A numpy array containing the labelled annotation number for each
+ annotation.
+ aux_note : list, optional
+ A list containing the auxiliary information string (or None for
+ annotations without notes) for each annotation.
+ label_store : numpy array, optional
+ A numpy array containing the integer values used to store the
+ annotation labels. If this field is present, `symbol` must not be
+ present.
+ fs : int, or float, optional
+ The numerical sampling frequency of the record to be written to the file.
+ custom_labels : pandas dataframe, optional
+ The map of custom defined annotation labels used for this annotation, in
+ addition to the standard WFDB annotation labels. Custom labels are
+ defined by two or three fields:
+ - The integer values used to store custom annotation labels in the file
+ (optional)
+ - Their short display symbols
+ - Their long descriptions.
+
+ This input argument may come in four formats:
+ 1. A pandas.DataFrame object with columns:
+ ['label_store', 'symbol', 'description']
+ 2. A pandas.DataFrame object with columns: ['symbol', 'description']
+ If this option is chosen, label_store values are automatically chosen.
+ 3. A list or tuple of tuple triplets, with triplet elements
+ representing: (label_store, symbol, description).
+ 4. A list or tuple of tuple pairs, with pair elements representing:
+ (symbol, description). If this option is chosen, label_store values
+ are automatically chosen.
+
+ If the `label_store` field is given for this function, and
+ `custom_labels` is defined, `custom_labels` must contain `label_store`
+ in its mapping. ie. it must come in format 1 or 3 above.
+ write_dir : str, optional
+ The directory in which to write the annotation file
+
+ Notes
+ -----
+ This is a gateway function, written as a simple way to write WFDB annotation
+ files without needing to explicity create an Annotation object. You may also
+ create an Annotation object, manually set its attributes, and call its
+ `wrann` instance method.
+
+ Each annotation stored in a WFDB annotation file contains a sample field and
+ a label field. All other fields may or may not be present.
+
+ Examples
+ --------
+ >>> # Read an annotation as an Annotation object
+ >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb')
+ >>> # Write a copy of the annotation file
+ >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
+
"""
# Create Annotation object
- annotation = Annotation(recordname=recordname, extension=extension, sample=sample, symbol=symbol,
- subtype=subtype, chan=chan, num=num, aux_note=aux_note, label_store=label_store,
- fs=fs, custom_labels=custom_labels)
+ annotation = Annotation(record_name=record_name, extension=extension,
+ sample=sample, symbol=symbol, subtype=subtype,
+ chan=chan, num=num, aux_note=aux_note,
+ label_store=label_store, fs=fs,
+ custom_labels=custom_labels)
# Find out which input field describes the labels
if symbol is None:
@@ -1067,15 +1149,17 @@ def wrann(recordname, extension, sample, symbol=None, subtype=None, chan=None, n
raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels")
# Perform field checks and write the annotation file
- annotation.wrann(writefs = True)
+ annotation.wrann(write_fs=True, write_dir=write_dir)
def show_ann_labels():
"""
- Display the standard wfdb annotation label mapping
-
- Usage:
- show_ann_labels()
+ Display the standard wfdb annotation label mapping.
+
+ Examples
+ --------
+ >>> show_ann_labels()
+
"""
print(ann_label_table)
@@ -1084,97 +1168,128 @@ def show_ann_classes():
"""
Display the standard wfdb annotation classes
- Usage:
- show_ann_classes()
+ Examples
+ --------
+ >>> show_ann_classes()
+
"""
print(ann_class_table)
-## ------------- Reading Annotations ------------- ##
# todo: return as df option?
-def rdann(recordname, extension, sampfrom=0, sampto=None, shiftsamps=False,
- pbdir=None, return_label_elements=['symbol'], summarize_labels=False):
- """ Read a WFDB annotation file recordname.extension and return an
+def rdann(record_name, extension, sampfrom=0, sampto=None, shift_samps=False,
+ pb_dir=None, return_label_elements=['symbol'],
+ summarize_labels=False):
+ """
+ Read a WFDB annotation file record_name.extension and return an
Annotation object.
- Usage:
- annotation = rdann(recordname, extension, sampfrom=0, sampto=None, shiftsamps=False,
- pbdir=None, return_label_elements=['symbol'], summarize_labels=False)
-
- Input arguments:
- - recordname (required): The record name of the WFDB annotation file. ie. for
- file '100.atr', recordname='100'
- - extension (required): The annotatator extension of the annotation file. ie. for
- file '100.atr', extension='atr'
- - sampfrom (default=0): The minimum sample number for annotations to be returned.
- - sampto (default=None): The maximum sample number for annotations to be returned.
- - shiftsamps (default=False): Boolean flag that specifies whether to return the
- sample indices relative to 'sampfrom' (True), or sample 0 (False). Annotation files
- store exact sample locations.
- - pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
- directory from which to find the required annotation file.
- eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- - return_label_elements (default=['symbol']): The label elements that are to be returned
- from reading the annotation file. A list with at least one of the following: 'symbol',
- 'label_store', 'description'.
- - summarize_labels (default=False): Assign a summary table of the set of annotation labels
- contained in the file to the 'contained_labels' attribute of the returned object.
- This table will contain the columns: ['label_store', 'symbol', 'description', 'n_occurences']
-
- Output argument:
- - annotation: The Annotation object. Call help(wfdb.Annotation) for the attribute
- descriptions.
-
- Note: For every annotation sample, the annotation file explictly stores the 'sample'
- and 'symbol' fields but not necessarily the others. When reading annotation files
- using this function, fields which are not stored in the file will either take their
- default values of 0 or None, or will be carried over from their previous values if any.
-
- Example usage:
- import wfdb
- ann = wfdb.rdann('sampledata/100', 'atr', sampto = 300000)
+ Parameters
+ ----------
+ record_name : str
+ The record name of the WFDB annotation file. ie. for file '100.atr',
+ record_name='100'.
+ extension : str
+ The annotatator extension of the annotation file. ie. for file
+ '100.atr', extension='atr'.
+ sampfrom : int, optional
+ The minimum sample number for annotations to be returned.
+ sampto : int, optional
+ The maximum sample number for annotations to be returned.
+ shift_samps : bool, optional
+ Specifies whether to return the sample indices relative to `sampfrom`
+ (True), or sample 0 (False).
+ pb_dir : str, optional
+ Option used to stream data from Physiobank. The Physiobank database
+ directory from which to find the required annotation file. eg. For
+ record '100' in 'http://physionet.org/physiobank/database/mitdb':
+ pb_dir='mitdb'.
+ return_label_elements : list, optional
+ The label elements that are to be returned from reading the annotation
+ file. A list with at least one of the following options: 'symbol',
+ 'label_store', 'description'.
+ summarize_labels : bool, optional
+ If True, assign a summary table of the set of annotation labels
+ contained in the file to the 'contained_labels' attribute of the
+ returned object. This table will contain the columns:
+ ['label_store', 'symbol', 'description', 'n_occurences']
+
+ Returns
+ -------
+ annotation : Annotation
+ The Annotation object. Call help(wfdb.Annotation) for the attribute
+ descriptions.
+
+ Notes
+ -----
+ For every annotation sample, the annotation file explictly stores the
+ 'sample' and 'symbol' fields, but not necessarily the others. When reading
+ annotation files using this function, fields which are not stored in the
+ file will either take their default values of 0 or None, or will be carried
+ over from their previous values if any.
+
+ Examples
+ --------
+ >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=300000)
+
"""
- return_label_elements = check_read_inputs(sampfrom, sampto, return_label_elements)
+ return_label_elements = check_read_inputs(sampfrom, sampto,
+ return_label_elements)
# Read the file in byte pairs
- filebytes = load_byte_pairs(recordname, extension, pbdir)
+ filebytes = load_byte_pairs(record_name, extension, pb_dir)
# Get wfdb annotation fields from the file bytes
- sample, label_store, subtype, chan, num, aux_note = proc_ann_bytes(filebytes, sampto)
+ (sample, label_store, subtype,
+ chan, num, aux_note) = proc_ann_bytes(filebytes, sampto)
# Get the indices of annotations that hold definition information about
# the entire annotation file, and other empty annotations to be removed.
- potential_definition_inds, rm_inds = get_special_inds(sample, label_store, aux_note)
+ potential_definition_inds, rm_inds = get_special_inds(sample, label_store,
+ aux_note)
# Try to extract information describing the annotation file
- fs, custom_labels = interpret_defintion_annotations(potential_definition_inds, aux_note)
+ (fs,
+ custom_labels) = interpret_defintion_annotations(potential_definition_inds,
+ aux_note)
# Remove annotations that do not store actual sample and label information
- sample, label_store, subtype, chan, num, aux_note = rm_empty_indices(rm_inds, sample, label_store, subtype, chan, num, aux_note)
+ (sample, label_store, subtype,
+ chan, num, aux_note) = rm_empty_indices(rm_inds, sample, label_store,
+ subtype, chan, num, aux_note)
- # Convert lists to numpy arrays
- sample, label_store, subtype, chan, num= lists_to_arrays(sample, label_store, subtype, chan, num)
+ # Convert lists to numpy arrays dtype='int'
+ (sample, label_store, subtype,
+ chan, num) = lists_to_int_arrays(sample, label_store, subtype, chan, num)
- # Obtain annotation sample relative to the starting signal index
- if shiftsamps and len(sample) > 0 and sampfrom:
- sample = sample - sampfrom
-
- # Try to get fs from the header file if it is not contained in the annotation file
+ # Try to get fs from the header file if it is not contained in the
+ # annotation file
if fs is None:
try:
- rec = records.rdheader(recordname, pbdir)
+ rec = record.rdheader(record_name, pb_dir)
fs = rec.fs
except:
pass
# Create the annotation object
- annotation = Annotation(os.path.split(recordname)[1], extension, sample=sample, label_store=label_store,
- subtype=subtype, chan=chan, num=num, aux_note=aux_note, fs=fs,
+ annotation = Annotation(record_name=os.path.split(record_name)[1],
+ extension=extension, sample=sample,
+ label_store=label_store, subtype=subtype,
+ chan=chan, num=num, aux_note=aux_note, fs=fs,
custom_labels=custom_labels)
+ # Apply the desired index range
+ if sampfrom > 0 and sampto is not None:
+ annotation.apply_range(sampfrom=sampfrom, sampto=sampto)
+
+ # If specified, obtain annotation samples relative to the starting
+ # index
+ if shift_samps and len(sample) > 0 and sampfrom:
+ annotation.sample = annotation.sample - sampfrom
- # Get the set of unique label definitions contained in this annotation
+ # Get the set of unique label definitions contained in this
+ # annotation
if summarize_labels:
annotation.get_contained_labels(inplace=True)
@@ -1200,14 +1315,14 @@ def check_read_inputs(sampfrom, sampto, return_label_elements):
return return_label_elements
# Load the annotation file 1 byte at a time and arrange in pairs
-def load_byte_pairs(recordname, extension, pbdir):
+def load_byte_pairs(record_name, extension, pb_dir):
# local file
- if pbdir is None:
- with open(recordname + '.' + extension, 'rb') as f:
+ if pb_dir is None:
+ with open(record_name + '.' + extension, 'rb') as f:
filebytes = np.fromfile(f, '> 2
while (current_label_store > 59):
- subtype, chan, num, aux_note, update, bpi = proc_extra_field(current_label_store, filebytes,
+ subtype, chan, num, aux_note, update, bpi = proc_extra_field(current_label_store, filebytes,
bpi, subtype, chan, num,
aux_note, update)
@@ -1263,12 +1378,12 @@ def proc_ann_bytes(filebytes, sampto):
# Get the sample difference and store fields of the current annotation
def proc_core_fields(filebytes, bpi):
-
+
label_store = filebytes[bpi, 1] >> 2
# The current byte pair will contain either the actual d_sample + annotation store value,
# or 0 + SKIP.
-
+
# Not a skip - it is the actual sample number + annotation type store value
if label_store != 59:
sample_diff = filebytes[bpi, 0] + 256 * (filebytes[bpi, 1] & 3)
@@ -1309,7 +1424,7 @@ def proc_extra_field(label_store, filebytes, bpi, subtype, chan, num, aux_note,
bpi = bpi + 1
# CHAN
elif label_store == 62:
- # chan is interpreted as unsigned char
+ # chan is interpreted as un_signed char
chan.append(filebytes[bpi, 0])
update['chan'] = False
bpi = bpi + 1
@@ -1343,7 +1458,7 @@ def update_extra_fields(subtype, chan, num, aux_note, update):
- aux_note and sub are set to default values if missing.
- chan and num copy over previous value if missing.
"""
-
+
if update['subtype']:
subtype.append(0)
@@ -1373,7 +1488,7 @@ def get_special_inds(sample, label_store, aux_note):
Get the indices of annotations that hold definition information about
the entire annotation file, and other empty annotations to be removed.
- Note: There is no need to deal with SKIP annotations (label_store=59)
+ Note: There is no need to deal with SKIP annotations (label_store=59)
which were already dealt with in proc_core_fields and hence not
included here.
"""
@@ -1396,7 +1511,7 @@ def get_special_inds(sample, label_store, aux_note):
def interpret_defintion_annotations(potential_definition_inds, aux_note):
"""
Try to extract annotation definition information from annotation notes.
- Information that may be contained:
+ Information that may be contained:
- fs - sample=0, label_state=22, aux_note='## time resolution: XXX'
- custom annotation label definitions
"""
@@ -1446,9 +1561,9 @@ def rm_empty_indices(*args):
return [[a[i] for i in keep_inds] for a in args[1:]]
-def lists_to_arrays(*args):
+def lists_to_int_arrays(*args):
"""
- Convert lists to numpy arrays
+ Convert lists to numpy int arrays
"""
return [np.array(a, dtype='int') for a in args]
@@ -1465,9 +1580,9 @@ def rm_last(*args):
## ------------- /Reading Annotations ------------- ##
# Allowed types of each Annotation object attribute.
-ann_field_types = {'recordname': (str), 'extension': (str), 'sample': (np.ndarray),
+ann_field_types = {'record_name': (str), 'extension': (str), 'sample': (np.ndarray),
'symbol': (list, np.ndarray), 'subtype': (np.ndarray), 'chan': (np.ndarray),
- 'num': (np.ndarray), 'aux_note': (list, np.ndarray), 'fs': _headers.floattypes,
+ 'num': (np.ndarray), 'aux_note': (list, np.ndarray), 'fs': _header.float_types,
'label_store': (np.ndarray), 'description':(list, np.ndarray), 'custom_labels': (pd.DataFrame, list, tuple),
'contained_labels':(pd.DataFrame, list, tuple)}
@@ -1498,7 +1613,7 @@ def __init__(self, extension, description, human_reviewed):
AnnotationClass('qrsc', 'Human reviewed qrs detections', True),
AnnotationClass('qrs', 'Machine QRS detections', False),
-
+
AnnotationClass('bph', 'Human reviewed BP beat detections', True),
AnnotationClass('bpm', 'Machine BP beat detections', False),
@@ -1512,7 +1627,7 @@ def __init__(self, extension, description, human_reviewed):
]
ann_class_table = pd.DataFrame({'extension':[ac.extension for ac in ann_classes], 'description':[ac.description for ac in ann_classes],
- 'human_reviewed':[ac.human_reviewed for ac in ann_classes]})
+ 'human_reviewed':[ac.human_reviewed for ac in ann_classes]})
ann_class_table.set_index(ann_class_table['extension'].values, inplace=True)
ann_class_table = ann_class_table[['extension', 'description', 'human_reviewed']]
@@ -1581,7 +1696,7 @@ def __str__(self):
]
-ann_label_table = pd.DataFrame({'label_store':np.array([al.label_store for al in ann_labels], dtype='int'), 'symbol':[al.symbol for al in ann_labels],
+ann_label_table = pd.DataFrame({'label_store':np.array([al.label_store for al in ann_labels], dtype='int'), 'symbol':[al.symbol for al in ann_labels],
'description':[al.description for al in ann_labels]})
ann_label_table.set_index(ann_label_table['label_store'].values, inplace=True)
ann_label_table = ann_label_table[['label_store','symbol','description']]
diff --git a/wfdb/io/download.py b/wfdb/io/download.py
new file mode 100644
index 00000000..f45c3190
--- /dev/null
+++ b/wfdb/io/download.py
@@ -0,0 +1,316 @@
+import multiprocessing
+import numpy as np
+import re
+import os
+import posixpath
+import requests
+
+
+db_index_url = 'http://physionet.org/physiobank/database/'
+
+
+# Read a header file from physiobank
+def stream_header(record_name, pb_dir):
+
+ # Full url of header location
+ url = posixpath.join(db_index_url, pb_dir, record_name+'.hea')
+ r = requests.get(url)
+
+ # Raise HTTPError if invalid url
+ r.raise_for_status()
+
+ # Get each line as a string
+ filelines = r.content.decode('iso-8859-1').splitlines()
+
+ # Separate content into header and comment lines
+ header_lines = []
+ comment_lines = []
+
+ for line in filelines:
+ line = str(line.strip())
+ # Comment line
+ if line.startswith('#'):
+ comment_lines.append(line)
+ # Non-empty non-comment line = header line.
+ elif line:
+ # Look for a comment in the line
+ ci = line.find('#')
+ if ci > 0:
+ header_lines.append(line[:ci])
+ # comment on same line as header line
+ comment_lines.append(line[ci:])
+ else:
+ header_lines.append(line)
+
+ return (header_lines, comment_lines)
+
+
+# Read certain bytes from a dat file from physiobank
+def stream_dat(file_name, pb_dir, fmt, bytecount, startbyte, datatypes):
+
+ # Full url of dat file
+ url = posixpath.join(db_index_url, pb_dir, file_name)
+
+ # Specify the byte range
+ endbyte = startbyte + bytecount-1
+ headers = {"Range": "bytes="+str(startbyte)+"-"+str(endbyte),
+ 'Accept-Encoding': '*/*'}
+
+ # Get the content
+ r = requests.get(url, headers=headers, stream=True)
+
+ # Raise HTTPError if invalid url
+ r.raise_for_status()
+
+ sigbytes = r.content
+
+ # Convert to numpy array
+ sigbytes = np.fromstring(sigbytes, dtype = np.dtype(datatypes[fmt]))
+
+ return sigbytes
+
+# Read an entire annotation file from physiobank
+def stream_annotation(file_name, pb_dir):
+
+ # Full url of annotation file
+ url = posixpath.join(db_index_url, pb_dir, file_name)
+
+ # Get the content
+ r = requests.get(url)
+ # Raise HTTPError if invalid url
+ r.raise_for_status()
+
+ annbytes = r.content
+
+ # Convert to numpy array
+ annbytes = np.fromstring(annbytes, dtype = np.dtype('>> dbs = get_dbs()
+
+ """
+ url = posixpath.join(db_index_url, 'DBS')
+ r = requests.get(url)
+
+ dbs = r.content.decode('ascii').splitlines()
+ dbs = [re.sub('\t{2,}', '\t', line).split('\t') for line in dbs]
+
+ return dbs
+
+
+# ---- Helper functions for downloading physiobank files ------- #
+
+def get_record_list(db_dir, records='all'):
+ """
+ Get a list of records belonging to a database.
+
+ Parameters
+ ----------
+ db_dir : str
+ The database directory, usually the same as the database slug.
+ The location to look for a RECORDS file.
+ records : list, optional
+ A Option used when this function acts as a helper function.
+ Leave as default 'all' to get all records.
+
+ Examples
+ --------
+ >>> wfdb.get_record_list('mitdb')
+
+ """
+ # Full url physiobank database
+ db_url = posixpath.join(db_index_url, db_dir)
+
+ # Check for a RECORDS file
+ if records == 'all':
+ r = requests.get(posixpath.join(db_url, 'RECORDS'))
+ if r.status_code == 404:
+ raise ValueError('The database '+db_url+' has no WFDB files to download')
+
+ # Get each line as a string
+ recordlist = r.content.decode('ascii').splitlines()
+ # Otherwise the records are input manually
+ else:
+ recordlist = records
+
+ return recordlist
+
+
+def get_annotators(db_dir, annotators):
+
+ # Full url physiobank database
+ db_url = posixpath.join(db_index_url, db_dir)
+
+ if annotators is not None:
+ # Check for an ANNOTATORS file
+ r = requests.get(posixpath.join(db_url, 'ANNOTATORS'))
+ if r.status_code == 404:
+ if annotators == 'all':
+ return
+ else:
+ raise ValueError('The database '+db_url+' has no annotation files to download')
+ # Make sure the input annotators are present in the database
+ annlist = r.content.decode('ascii').splitlines()
+ annlist = [a.split('\t')[0] for a in annlist]
+
+ # Get the annotation file types required
+ if annotators == 'all':
+ # all possible ones
+ annotators = annlist
+ else:
+ # In case they didn't input a list
+ if type(annotators) == str:
+ annotators = [annotators]
+ # user input ones. Check validity.
+ for a in annotators:
+ if a not in annlist:
+ raise ValueError('The database contains no annotators with extension: '+a)
+
+ return annotators
+
+
+# Make any required local directories
+def make_local_dirs(dl_dir, dlinputs, keep_subdirs):
+
+ # Make the local download dir if it doesn't exist
+ if not os.path.isdir(dl_dir):
+ os.makedirs(dl_dir)
+ print("Created local base download directory: ", dl_dir)
+ # Create all required local subdirectories
+ # This must be out of dl_pb_file to
+ # avoid clash in multiprocessing
+ if keep_subdirs:
+ dldirs = set([os.path.join(dl_dir, d[1]) for d in dlinputs])
+ for d in dldirs:
+ if not os.path.isdir(d):
+ os.makedirs(d)
+ return
+
+
+def dl_pb_file(inputs):
+ # Download a file from physiobank
+ # The input args are to be unpacked for the use of multiprocessing
+
+ basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs
+
+ # Full url of file
+ url = posixpath.join(db_index_url, db, subdir, basefile)
+
+ # Get the request header
+ rh = requests.head(url, headers={'Accept-Encoding': 'identity'})
+ # Raise HTTPError if invalid url
+ rh.raise_for_status()
+
+ # Supposed size of the file
+ onlinefilesize = int(rh.headers['content-length'])
+
+ # Figure out where the file should be locally
+ if keep_subdirs:
+ dldir = os.path.join(dl_dir, subdir)
+ else:
+ dldir = dl_dir
+
+ localfile = os.path.join(dldir, basefile)
+
+ # The file exists locally.
+ if os.path.isfile(localfile):
+ # Redownload regardless
+ if overwrite:
+ dl_full_file(url, localfile)
+ # Process accordingly.
+ else:
+ localfilesize = os.path.getsize(localfile)
+ # Local file is smaller than it should be. Append it.
+ if localfilesize < onlinefilesize:
+ print('Detected partially downloaded file: '+localfile+' Appending file...')
+ headers = {"Range": "bytes="+str(localfilesize)+"-", 'Accept-Encoding': '*/*'}
+ r = requests.get(url, headers=headers, stream=True)
+ print('headers: ', headers)
+ print('r content length: ', len(r.content))
+ with open(localfile, "ba") as writefile:
+ writefile.write(r.content)
+ print('Done appending.')
+ # Local file is larger than it should be. Redownload.
+ elif localfilesize > onlinefilesize:
+ dl_full_file(url, localfile)
+ # If they're the same size, do nothing.
+
+ # The file doesn't exist. Download it.
+ else:
+ dl_full_file(url, localfile)
+
+ return
+
+
+def dl_full_file(url, localfile):
+ # Download a file. No checks.
+ r = requests.get(url)
+ with open(localfile, "wb") as writefile:
+ writefile.write(r.content)
+
+ return
+
+
+def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False):
+ """
+ Download specified files from a Physiobank database.
+
+ Parameters
+ ----------
+ db : str
+ The Physiobank database directory to download. eg. For database:
+ 'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
+ dl_dir : str
+ The full local directory path in which to download the files.
+ files : list
+ A list of strings specifying the file names to download relative to the
+ database base directory.
+ keep_subdirs : bool, optional
+ Whether to keep the relative subdirectories of downloaded files as they
+ are organized in Physiobank (True), or to download all files into the
+ same base directory (False).
+ overwrite : bool, optional
+ If True, all files will be redownloaded regardless. If False, existing
+ files with the same name and relative subdirectory will be checked.
+ If the local file is the same size as the online file, the download is
+ skipped. If the local file is larger, it will be deleted and the file
+ will be redownloaded. If the local file is smaller, the file will be
+ assumed to be partially downloaded and the remaining bytes will be
+ downloaded and appended.
+
+ Examples
+ --------
+ >>> wfdb.dl_files('ahadb', os.getcwd(),
+ ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
+ 'data/001a.dat'])
+
+ """
+
+ # Full url physiobank database
+ db_url = posixpath.join(db_index_url, db)
+ # Check if the database is valid
+ r = requests.get(db_url)
+ r.raise_for_status()
+
+ # Construct the urls to download
+ dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files]
+
+ # Make any required local directories
+ make_local_dirs(dl_dir, dlinputs, keep_subdirs)
+
+ print('Downloading files...')
+ # Create multiple processes to download files.
+ # Limit to 2 connections to avoid overloading the server
+ pool = multiprocessing.Pool(processes=2)
+ pool.map(dl_pb_file, dlinputs)
+ print('Finished downloading files')
+
+ return
diff --git a/wfdb/io/record.py b/wfdb/io/record.py
new file mode 100644
index 00000000..ee29ea0f
--- /dev/null
+++ b/wfdb/io/record.py
@@ -0,0 +1,1474 @@
+# For wrheader(), all fields must be already filled in and cohesive with one another other. The signals field will not be used.
+# For wrsamp(), the field to use will be d_signal (which is allowed to be empty for 0 channel records).
+# set_p_features and set_d_features use characteristics of the p_signal or d_signal field to fill in other header fields.
+# These are separate from another method 'set_defaults' which the user may call to set default header fields
+# The check_field_cohesion() function will be called in wrheader which checks all the header fields.
+# The check_sig_cohesion() function will be called in wrsamp in wrdat to check the d_signal against the header fields.
+
+from calendar import monthrange
+from collections import OrderedDict
+import multiprocessing
+import numpy as np
+import os
+import posixpath
+import re
+import requests
+
+from . import _header
+from . import _signal
+from . import download
+
+
+class BaseRecord(object):
+ # The base WFDB class extended by the Record and MultiRecord classes.
+ def __init__(self, record_name=None, n_sig=None,
+ fs=None, counter_freq=None, base_counter=None,
+ sig_len=None, base_time=None, base_date=None,
+ comments=None, sig_name=None):
+ self.record_name = record_name
+ self.n_sig = n_sig
+ self.fs = fs
+ self.counter_freq = counter_freq
+ self.base_counter = base_counter
+ self.sig_len = sig_len
+ self.base_time = base_time
+ self.base_date = base_date
+ self.comments = comments
+ self.sig_name = sig_name
+
+ # Check whether a single field is valid in its basic form. Does not check compatibility with other fields.
+ # ch is only used for signal specification fields, specifying the channels to check. Other channels
+ # can be None.
+ # Be aware that this function is not just called from wrheader.
+ def check_field(self, field, channels=None):
+ # Check that the field is present
+ if getattr(self, field) is None:
+ raise Exception("Missing field required: "+field)
+
+ # Check the type of the field (and of its elements if it should be a list)
+ self.check_field_type(field, channels)
+
+ # Expand to make sure all channels must have present field
+ if channels == 'all':
+ channels = [1]*len(getattr(self, field))
+
+ # Individual specific field checks:
+ if field == 'd_signal':
+ # Check shape
+ if self.d_signal.ndim != 2:
+ raise TypeError("d_signal must be a 2d numpy array")
+ # Check dtype
+ if self.d_signal.dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
+ raise TypeError('d_signal must be a 2d numpy array with dtype == int64, int32, int16, or int8.')
+ elif field =='p_signal':
+ # Check shape
+ if self.p_signal.ndim != 2:
+ raise TypeError("p_signal must be a 2d numpy array")
+
+ elif field == 'e_d_signal':
+ # Check shape
+ for ch in range(len(channels)):
+ if self.e_d_signal[ch].ndim != 1:
+ raise TypeError("e_d_signal must be a list of 1d numpy arrays")
+ # Check dtype
+ if self.e_d_signal[ch].dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
+ raise TypeError('e_d_d_signal must be a list of 1d numpy arrays with dtype == int64, int32, int16, or int8.')
+ elif field =='e_p_signal':
+ # Check shape
+ for ch in range(0, len(channels)):
+ if self.e_p_signal.ndim != 1:
+ raise TypeError("e_p_signal must be a list of 1d numpy arrays")
+
+ #elif field == 'segments': # Nothing to check here.
+ # Record specification fields
+ elif field == 'record_name':
+ # Allow letters, digits, hyphens, and underscores.
+ accepted_string = re.match('[-\w]+', self.record_name)
+ if not accepted_string or accepted_string.string != self.record_name:
+ raise ValueError('record_name must only comprise of letters, digits, hyphens, and underscores.')
+ elif field == 'n_seg':
+ if self.n_seg <=0:
+ raise ValueError('n_seg must be a positive integer')
+ elif field == 'n_sig':
+ if self.n_sig <=0:
+ raise ValueError('n_sig must be a positive integer')
+ elif field == 'fs':
+ if self.fs<=0:
+ raise ValueError('fs must be a positive number')
+ elif field == 'counter_freq':
+ if self.counter_freq <=0:
+ raise ValueError('counter_freq must be a positive number')
+ elif field == 'base_counter':
+ if self.base_counter <=0:
+ raise ValueError('base_counter must be a positive number')
+ elif field == 'sig_len':
+ if self.sig_len <0:
+ raise ValueError('sig_len must be a non-negative integer')
+ elif field == 'base_time':
+ _ = parse_timestring(self.base_time)
+ elif field == 'base_date':
+ _ = parse_datestring(self.base_date)
+
+ # Signal specification fields. Lists of elements to check.
+ elif field in _header.sig_field_specs:
+
+ for ch in range(0, len(channels)):
+ f = getattr(self, field)[ch]
+
+ # The channel element is allowed to be None
+ if not channels[ch]:
+ if f is None:
+ continue
+
+ if field == 'file_name':
+ # Check for file_name characters
+ accepted_string = re.match('[-\w]+\.?[\w]+',f)
+ if not accepted_string or accepted_string.string != f:
+ raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record_100.dat')
+ # Check that dat files are grouped together
+ if orderedsetlist(self.file_name)[0] != orderednoconseclist(self.file_name):
+ raise ValueError('file_name error: all entries for signals that share a given file must be consecutive')
+ elif field == 'fmt':
+ if f not in _signal.dat_fmts:
+ raise ValueError('File formats must be valid WFDB dat formats: '+' , '.join(_signal.dat_fmts))
+ elif field == 'samps_per_frame':
+ if f < 1:
+ raise ValueError('samps_per_frame values must be positive integers')
+ elif field == 'skew':
+ if f < 0:
+ raise ValueError('skew values must be non-negative integers')
+ elif field == 'byte_offset':
+ if f < 0:
+ raise ValueError('byte_offset values must be non-negative integers')
+ elif field == 'adc_gain':
+ if f <= 0:
+ raise ValueError('adc_gain values must be positive numbers')
+ elif field == 'baseline':
+ # Currently original WFDB library only has 4 bytes for baseline.
+ if f < -2147483648 or f> 2147483648:
+ raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)')
+ elif field == 'units':
+ if re.search('\s', f):
+ raise ValueError('units strings may not contain whitespaces.')
+ elif field == 'adc_res':
+ if f < 0:
+ raise ValueError('adc_res values must be non-negative integers')
+ # elif field == 'adc_zero': nothing to check here
+ # elif field == 'init_value': nothing to check here
+ # elif field == 'checksum': nothing to check here
+ elif field == 'block_size':
+ if f < 0:
+ raise ValueError('block_size values must be non-negative integers')
+ elif field == 'sig_name':
+ if re.search('\s', f):
+ raise ValueError('sig_name strings may not contain whitespaces.')
+ if len(set(self.sig_name)) != len(self.sig_name):
+ raise ValueError('sig_name strings must be unique.')
+
+ # Segment specification fields
+ elif field == 'seg_name':
+ # Segment names must be alphanumerics or just a single '~'
+ for f in self.seg_name:
+ if f == '~':
+ continue
+ accepted_string = re.match('[-\w]+',f)
+ if not accepted_string or accepted_string.string != f:
+ raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'")
+ elif field == 'seg_len':
+ # For records with more than 1 segment, the first segment may be
+ # the layout specification segment with a length of 0
+ if len(self.seg_len)>1:
+ if self.seg_len[0] < 0:
+ raise ValueError('seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment')
+ sl = self.seg_len[1:]
+ else:
+ sl = self.seg_len
+ for f in sl:
+ if f < 1:
+ raise ValueError('seg_len values must be positive integers. Only seg_len[0] may be 0 to indicate a layout segment')
+ # Comment field
+ elif field == 'comments':
+ for f in self.comments:
+ if f=='': # Allow empty string comment lines
+ continue
+ if f[0] == '#':
+ print("Note: comment strings do not need to begin with '#'. This library adds them automatically.")
+ if re.search('[\t\n\r\f\v]', f):
+ raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
+
+
+ def check_field_type(self, field, ch=None):
+ """
+ Check the data type of the specified field.
+ ch is used for signal specification fields
+ Some fields are lists. This must be checked, along with their elements.
+ """
+ item = getattr(self, field)
+
+ # Record specification field. Nonlist.
+ if field in _header.rec_field_specs:
+ check_item_type(item, field, _header.rec_field_specs[field].allowed_types)
+
+ # Signal specification field. List.
+ elif field in _header.sig_field_specs:
+ check_item_type(item, field, _header.sig_field_specs[field].allowed_types, ch)
+
+ # Segment specification field. List. All elements cannot be None
+ elif field in _header.seg_field_specs:
+ check_item_type(item, field, _header.seg_field_specs[field].allowed_types, 'all')
+
+ # Comments field. List. Elements cannot be None
+ elif field == 'comments':
+ check_item_type(item, field, (str), 'all')
+
+ # Signals field.
+ elif field in ['p_signal','d_signal']:
+ check_item_type(item, field, (np.ndarray))
+
+ elif field in ['e_p_signal', 'e_d_signal']:
+ check_item_type(item, field, (np.ndarray), 'all')
+
+ # Segments field. List. Elements may be None.
+ elif field == 'segments':
+ check_item_type(item, field, (Record), 'none')
+
+ # Ensure that input read parameters are valid for the record
+ def check_read_inputs(self, sampfrom, sampto, channels, physical, m2s,
+ smooth_frames, return_res):
+ # Data Type Check
+ if not hasattr(sampfrom, '__index__'):
+ raise TypeError('sampfrom must be an integer')
+ if not hasattr(sampto, '__index__'):
+ raise TypeError('sampto must be an integer')
+
+ if not isinstance(channels, list):
+ raise TypeError('channels must be a list of integers')
+
+ # Duration Ranges
+ if sampfrom<0:
+ raise ValueError('sampfrom must be a non-negative integer')
+ if sampfrom>self.sig_len:
+ raise ValueError('sampfrom must be shorter than the signal length')
+ if sampto<0:
+ raise ValueError('sampto must be a non-negative integer')
+ if sampto>self.sig_len:
+ raise ValueError('sampto must be shorter than the signal length')
+ if sampto<=sampfrom:
+ raise ValueError('sampto must be greater than sampfrom')
+
+ # Channel Ranges
+ for c in channels:
+ if c<0:
+ raise ValueError('Input channels must all be non-negative integers')
+ if c>self.n_sig-1:
+ raise ValueError('Input channels must all be lower than the total number of channels')
+
+ if return_res not in [64, 32, 16, 8]:
+ raise ValueError("return_res must be one of the following: 64, 32, 16, 8")
+ if physical is True and return_res == 8:
+ raise ValueError("return_res must be one of the following when physical is True: 64, 32, 16")
+
+ # Cannot expand multiple samples/frame for multi-segment records
+ if isinstance(self, MultiRecord):
+
+ # If m2s == True, Physical must be true. There is no
+ # meaningful representation of digital signals transferred
+ # from individual segments.
+ if m2s is True and physical is not True:
+ raise Exception('If m2s is True, physical must also be True.')
+
+ if smooth_frames is False:
+ raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.')
+
+# Check the item type. Vary the print message regarding whether the item can be None.
+# Helper to check_field_type
+# channels is a list of booleans indicating whether the field's channel must be present (1) or may be None (0)
+# and is not just for signal specification fields
+def check_item_type(item, field, allowed_types, channels=None):
+
+ # Checking the list
+ if channels is not None:
+
+ # First make sure the item is a list
+ if not isinstance(item, list):
+ raise TypeError("Field: '"+field+"' must be a list")
+
+ # Expand to make sure all channels must have present field
+ if channels == 'all':
+ channels = [1]*len(item)
+
+ # Expand to allow any channel to be None
+ if channels == 'none':
+ channels = [0]*len(item)
+
+ for ch in range(0, len(channels)):
+
+ mustexist=channels[ch]
+ # The field must exist for the channel
+ if mustexist:
+ if not isinstance(item[ch], allowed_types):
+ raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be one of the following types:", allowed_types)
+
+ # The field may be None for the channel
+ else:
+ if not isinstance(item[ch], allowed_types) and item[ch] is not None:
+ raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be a 'None', or one of the following types:", allowed_types)
+
+ # Single scalar to check
+ else:
+ if not isinstance(item, allowed_types):
+ raise TypeError("Field: '"+field+"' must be one of the following types:", allowed_types)
+
+
+
+class Record(BaseRecord, _header.HeaderMixin, _signal.SignalMixin):
+ """
+ The class representing WFDB headers, and single segment WFDB records.
+
+ Record objects can be created using the initializer, by reading a WFDB
+ header with `rdheader`, or a WFDB record (header and associated dat files)
+ with `rdrecord`.
+
+ The attributes of the Record object give information about the record as
+ specified by: https://www.physionet.org/physiotools/wag/header-5.htm
+
+ In addition, the d_signal and p_signal attributes store the digital and
+ physical signals of WFDB records with at least one channel.
+
+ Examples
+ --------
+ >>> record = wfdb.Record(record_name='r1', fs=250, n_sig=2, sig_len=1000,
+ file_name=['r1.dat','r1.dat'])
+
+ """
+ def __init__(self, p_signal=None, d_signal=None,
+ e_p_signal=None, e_d_signal=None,
+ record_name=None, n_sig=None,
+ fs=None, counter_freq=None, base_counter=None,
+ sig_len=None, base_time=None, base_date=None,
+ file_name=None, fmt=None, samps_per_frame=None,
+ skew=None, byte_offset=None, adc_gain=None,
+ baseline=None, units=None, adc_res=None,
+ adc_zero=None, init_value=None, checksum=None,
+ block_size=None, sig_name=None, comments=None):
+
+ # Note the lack of the 'n_seg' field. Single segment records cannot
+ # have this field. Even n_seg = 1 makes the header a multi-segment
+ # header.
+
+ super(Record, self).__init__(record_name, n_sig,
+ fs, counter_freq, base_counter, sig_len,
+ base_time, base_date, comments, sig_name)
+
+ self.p_signal = p_signal
+ self.d_signal = d_signal
+ self.e_p_signal = e_p_signal
+ self.e_d_signal = e_d_signal
+
+ self.file_name = file_name
+ self.fmt = fmt
+ self.samps_per_frame = samps_per_frame
+ self.skew = skew
+ self.byte_offset = byte_offset
+ self.adc_gain = adc_gain
+ self.baseline = baseline
+ self.units = units
+ self.adc_res = adc_res
+ self.adc_zero = adc_zero
+ self.init_value = init_value
+ self.checksum = checksum
+ self.block_size = block_size
+
+ # Equal comparison operator for objects of this type
+ def __eq__(self, other):
+ att1 = self.__dict__
+ att2 = other.__dict__
+
+ if set(att1.keys()) != set(att2.keys()):
+ return False
+
+ for k in att1.keys():
+
+ v1 = att1[k]
+ v2 = att2[k]
+
+ if type(v1) != type(v2):
+ return False
+
+ if type(v1) == np.ndarray:
+ if not np.array_equal(v1, v2):
+ return False
+ else:
+ if v1 != v2:
+ return False
+
+ return True
+
+
+ def wrsamp(self, expanded=False, write_dir=''):
+ """
+ Write a wfdb header file and any associated dat files from this
+ object.
+
+ Parameters
+ ----------
+ expanded : bool, optional
+ Whether to write the expanded signal (e_d_signal) instead
+ of the uniform signal (d_signal).
+ write_dir : str, optional
+ The directory in which to write the files.
+
+ """
+ # Perform field validity and cohesion checks, and write the
+ # header file.
+ self.wrheader(write_dir=write_dir)
+ if self.n_sig>0:
+ # Perform signal validity and cohesion checks, and write the
+ # associated dat files.
+ self.wr_dats(expanded=expanded, write_dir=write_dir)
+
+
+ def arrange_fields(self, channels, expanded=False):
+ # Arrange/edit object fields to reflect user channel and/or signal range input
+ # Account for case when signals are expanded
+
+ # Rearrange signal specification fields
+ for field in _header.sig_field_specs:
+ item = getattr(self, field)
+ setattr(self, field, [item[c] for c in channels])
+
+ # Expanded signals - multiple samples per frame.
+ if expanded:
+ # Checksum and init_value to be updated if present
+ # unless the whole signal length was input
+ if self.sig_len != int(len(self.e_d_signal[0])/self.samps_per_frame[0]):
+ self.checksum = self.calc_checksum(expanded)
+ self.init_value = [s[0] for s in self.e_d_signal]
+
+ self.n_sig = len(channels)
+ self.sig_len = int(len(self.e_d_signal[0])/self.samps_per_frame[0])
+
+ # MxN numpy array d_signal
+ else:
+ # Checksum and init_value to be updated if present
+ # unless the whole signal length was input
+ if self.sig_len != self.d_signal.shape[0]:
+
+ if self.checksum is not None:
+ self.checksum = self.calc_checksum()
+ if self.init_value is not None:
+ ival = list(self.d_signal[0, :])
+ self.init_value = [int(i) for i in ival]
+
+ # Update record specification parameters
+ # Important that these get updated after^^
+ self.n_sig = len(channels)
+ self.sig_len = self.d_signal.shape[0]
+
+
+class MultiRecord(BaseRecord, _header.MultiHeaderMixin):
+ """
+ The class representing multi-segment WFDB records.
+
+ MultiRecord objects can be created using the initializer, or by reading a
+ multi-segment WFDB record using 'rdrecord' with the `m2s` (multi to single)
+ input parameter set to False.
+
+ The attributes of the MultiRecord object give information about the entire
+ record as specified by: https://www.physionet.org/physiotools/wag/header-5.htm
+
+ In addition, the `segments` parameter is a list of Record objects
+ representing each individual segment, or None representing empty segments,
+ of the entire multi-segment record.
+
+ Notably, this class has no attribute representing the signals as a whole.
+ The 'multi_to_single' instance method can be called on MultiRecord objects
+ to return a single segment representation of the record as a Record object.
+ The resulting Record object will have its 'p_signal' field set.
+
+ Examples
+ --------
+ >>> record_m = wfdb.MultiRecord(record_name='rm', fs=50, n_sig=8,
+ sig_len=9999, seg_name=['rm_1', '~', rm_2'],
+ seg_len=[800, 200, 900])
+ >>> # Get a MultiRecord object
+ >>> record_s = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s=False)
+ >>> # Turn it into a
+ >>> record_s = record_s.multi_to_single()
+
+ record_s initially stores a `MultiRecord` object, and is then converted into
+ a `Record` object.
+
+ """
+ def __init__(self, segments=None, layout=None,
+ record_name=None, n_sig=None, fs=None,
+ counter_freq=None, base_counter=None,
+ sig_len=None, base_time=None, base_date=None,
+ seg_name=None, seg_len=None, comments=None,
+ sig_name=None, sig_segments=None):
+
+
+ super(MultiRecord, self).__init__(record_name, n_sig,
+ fs, counter_freq, base_counter, sig_len,
+ base_time, base_date, comments, sig_name)
+
+ self.layout = layout
+ self.segments = segments
+ self.seg_name = seg_name
+ self.seg_len = seg_len
+ self.sig_segments = sig_segments
+
+
+ def wrsamp(self, write_dir=''):
+ """
+ Write a multi-segment header, along with headers and dat files
+ for all segments, from this object.
+ """
+ # Perform field validity and cohesion checks, and write the
+ # header file.
+ self.wrheader(write_dir=write_dir)
+ # Perform record validity and cohesion checks, and write the
+ # associated segments.
+ for seg in self.segments:
+ seg.wrsamp(write_dir=write_dir)
+
+
+ # Check the cohesion of the segments field with other fields used to write the record
+ def checksegmentcohesion(self):
+
+ # Check that n_seg is equal to the length of the segments field
+ if self.n_seg != len(self.segments):
+ raise ValueError("Length of segments must match the 'n_seg' field")
+
+ for i in range(0, n_seg):
+ s = self.segments[i]
+
+ # If segment 0 is a layout specification record, check that its file names are all == '~''
+ if i==0 and self.seg_len[0] == 0:
+ for file_name in s.file_name:
+ if file_name != '~':
+ raise ValueError("Layout specification records must have all file_names named '~'")
+
+ # Check that sampling frequencies all match the one in the master header
+ if s.fs != self.fs:
+ raise ValueError("The 'fs' in each segment must match the overall record's 'fs'")
+
+ # Check the signal length of the segment against the corresponding seg_len field
+ if s.sig_len != self.seg_len[i]:
+ raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length')
+
+ totalsig_len = totalsig_len + getattr(s, 'sig_len')
+
+ # No need to check the sum of sig_lens from each segment object against sig_len
+ # Already effectively done it when checking sum(seg_len) against sig_len
+
+
+ # Determine the segments and the samples
+ # within each segment that have to be read in a
+ # multi-segment record. Called during rdsamp.
+ def required_segments(self, sampfrom, sampto, channels):
+
+ # The starting segment with actual samples
+ if self.layout == 'Fixed':
+ startseg = 0
+ else:
+ startseg = 1
+
+ # Cumulative sum of segment lengths (ignoring layout segment)
+ cumsumlengths = list(np.cumsum(self.seg_len[startseg:]))
+ # Get first segment
+ readsegs = [[sampfrom < cs for cs in cumsumlengths].index(True)]
+ # Get final segment
+ if sampto == cumsumlengths[len(cumsumlengths) - 1]:
+ readsegs.append(len(cumsumlengths) - 1)
+ else:
+ readsegs.append([sampto <= cs for cs in cumsumlengths].index(True))
+
+ # Add 1 for variable layout records
+ readsegs = list(np.add(readsegs,startseg))
+
+ # Obtain the sampfrom and sampto to read for each segment
+ if readsegs[1] == readsegs[0]:
+ # Only one segment to read
+ readsegs = [readsegs[0]]
+ # The segment's first sample number relative to the entire record
+ segstartsamp = sum(self.seg_len[0:readsegs[0]])
+ readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]
+
+ else:
+ # More than one segment to read
+ readsegs = list(range(readsegs[0], readsegs[1]+1))
+ readsamps = [[0, self.seg_len[s]] for s in readsegs]
+
+ # Starting sample for first segment.
+ readsamps[0][0] = sampfrom - ([0] + cumsumlengths)[readsegs[0]-startseg]
+
+ # End sample for last segment
+ readsamps[-1][1] = sampto - ([0] + cumsumlengths)[readsegs[-1]-startseg]
+
+ return (readsegs, readsamps)
+
+ # Get the channel numbers to be read from each segment
+ def required_signal(self, readsegs, channels, dirname, pb_dir):
+
+ # Fixed layout. All channels are the same.
+ if self.layout == 'Fixed':
+ # Should we bother here with skipping empty segments?
+ # They won't be read anyway.
+ readsigs = [channels]*len(readsegs)
+ # Variable layout: figure out channels by matching record names
+ else:
+ readsigs = []
+ # The overall layout signal names
+ l_sig_names = self.segments[0].sig_name
+ # The wanted signals
+ w_sig_names = [l_sig_names[c] for c in channels]
+
+ # For each segment ...
+ for i in range(0, len(readsegs)):
+ # Skip empty segments
+ if self.seg_name[readsegs[i]] == '~':
+ readsigs.append(None)
+ else:
+ # Get the signal names of the current segment
+ s_sig_names = rdheader(os.path.join(dirname, self.seg_name[readsegs[i]]), pb_dir = pb_dir).sig_name
+ readsigs.append(wanted_siginds(w_sig_names, s_sig_names))
+
+ return readsigs
+
+ # Arrange/edit object fields to reflect user channel and/or signal range input
+ def arrange_fields(self, readsegs, segranges, channels):
+
+ # Update seg_len values for relevant segments
+ for i in range(0, len(readsegs)):
+ self.seg_len[readsegs[i]] = segranges[i][1] - segranges[i][0]
+
+ # Update record specification parameters
+ self.n_sig = len(channels)
+ self.sig_len = sum([sr[1]-sr[0] for sr in segranges])
+
+ # Get rid of the segments and segment line parameters
+ # outside the desired segment range
+ if self.layout == 'Fixed':
+ self.segments = self.segments[readsegs[0]:readsegs[-1]+1]
+ self.seg_name = self.seg_name[readsegs[0]:readsegs[-1]+1]
+ self.seg_len = self.seg_len[readsegs[0]:readsegs[-1]+1]
+ else:
+ # Keep the layout specifier segment
+ self.segments = [self.segments[0]] + self.segments[readsegs[0]:readsegs[-1]+1]
+ self.seg_name = [self.seg_name[0]] + self.seg_name[readsegs[0]:readsegs[-1]+1]
+ self.seg_len = [self.seg_len[0]] + self.seg_len[readsegs[0]:readsegs[-1]+1]
+
+ # Update number of segments
+ self.n_seg = len(self.segments)
+
+
+ def multi_to_single(self, return_res=64):
+ """
+ Create a Record object from the MultiRecord object. All signal segments
+ will be combined into the new object's `p_signal` field.
+
+ Parameters
+ ----------
+ return_res : int
+ The return resolution of the `p_signal` field. Options are 64, 32,
+ and 16.
+
+ """
+
+ # The fields to transfer to the new object
+ fields = self.__dict__.copy()
+
+ # Remove multirecord fields
+ del(fields['segments'])
+ del(fields['seg_name'])
+ del(fields['seg_len'])
+ del(fields['n_seg'])
+
+ # The output physical signals
+ if return_res == 64:
+ floatdtype = 'float64'
+ elif return_res == 32:
+ floatdtype = 'float32'
+ else:
+ floatdtype = 'float16'
+
+
+ p_signal = np.zeros([self.sig_len, self.n_sig], dtype=floatdtype)
+
+ # Get the physical samples from each segment
+
+ # Start and end samples in the overall array
+ # to place the segment samples into
+ startsamps = [0] + list(np.cumsum(self.seg_len)[0:-1])
+ endsamps = list(np.cumsum(self.seg_len))
+
+ if self.layout == 'Fixed':
+ # Get the signal names and units from the first segment
+ fields['sig_name'] = self.segments[0].sig_name
+ fields['units'] = self.segments[0].units
+
+ for i in range(self.n_seg):
+ p_signal[startsamps[i]:endsamps[i],:] = self.segments[i].p_signal
+ # For variable layout, have to get channels by name
+ else:
+ # Get the signal names from the layout segment
+ fields['sig_name'] = self.segments[0].sig_name
+ fields['units'] = self.segments[0].units
+
+ for i in range(1, self.n_seg):
+ seg = self.segments[i]
+
+ # Empty segment
+ if seg is None:
+ p_signal[startsamps[i]:endsamps[i],:] = np.nan
+ # Non-empty segment
+ else:
+ # Figure out if there are any channels wanted and
+ # the output channels they are to be stored in
+ inchannels = []
+ outchannels = []
+ for s in fields['sig_name']:
+ if s in seg.sig_name:
+ inchannels.append(seg.sig_name.index(s))
+ outchannels.append(fields['sig_name'].index(s))
+
+ # Segment contains no wanted channels. Fill with nans.
+ if inchannels == []:
+ p_signal[startsamps[i]:endsamps[i],:] = np.nan
+ # Segment contains wanted channel(s). Transfer samples.
+ else:
+ # This statement is necessary in case this function is not called
+ # directly from rdsamp with m2s=True.
+ if not hasattr(seg, 'p_signal'):
+ seg.p_signal = seg.dac(return_res=return_res)
+ for ch in range(0, fields['n_sig']):
+ if ch not in outchannels:
+ p_signal[startsamps[i]:endsamps[i],ch] = np.nan
+ else:
+ p_signal[startsamps[i]:endsamps[i],ch] = seg.p_signal[:, inchannels[outchannels.index(ch)]]
+
+ # Create the single segment Record object and set attributes
+ record = Record()
+ for field in fields:
+ setattr(record, field, fields[field])
+ record.p_signal = p_signal
+
+ return record
+
+
+#------------------- Reading Records -------------------#
+
+def rdheader(record_name, pb_dir=None, rd_segments=False):
+ """
+ Read a WFDB header file and return the record descriptors as attributes
+ in a Record object.
+
+ Parameters
+ ----------
+ record_name : str
+ The name of the WFDB record to be read (without any file extensions).
+ If the argument contains any path delimiter characters, the argument
+ will be interpreted as PATH/baserecord and the header file will be
+ searched for in the local path.
+ pb_dir : str, optional
+ Option used to stream data from Physiobank. The Physiobank database
+ directory from which to find the required record files.
+ eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
+ pb_dir='mitdb'.
+
+ rd_segments : bool, optional
+ Used when reading multi-segment headers. If True, segment headers will
+ also be read (into the record object's `segments` field).
+
+ Returns
+ -------
+ record : Record or MultiRecord
+ The wfdb Record or MultiRecord object representing the contents of the
+ header read.
+
+ Examples
+ --------
+ >>> ecg_record = wfdb.rdheader('sample-data/test01_00s', sampfrom=800,
+ channels = [1,3])
+
+ """
+
+ # Read the header file. Separate comment and non-comment lines
+ header_lines, comment_lines = _header.get_header_lines(record_name, pb_dir)
+
+ # Get fields from record line
+ d_rec = _header.read_rec_line(header_lines[0])
+
+ # Processing according to whether the header is single or multi segment
+
+ # Single segment header - Process signal specification lines
+ if d_rec['n_seg'] is None:
+ # Create a single-segment WFDB record object
+ record = Record()
+
+ # There is at least one channel
+ if len(header_lines)>1:
+ # Read the fields from the signal lines
+ d_sig = _header.read_sig_lines(header_lines[1:])
+ # Set the object's signal line fields
+ for field in _header.sig_field_specs:
+ setattr(record, field, d_sig[field])
+
+ # Set the object's record line fields
+ for field in _header.rec_field_specs:
+ if field == 'n_seg':
+ continue
+ setattr(record, field, d_rec[field])
+ # Multi segment header - Process segment specification lines
+ else:
+ # Create a multi-segment WFDB record object
+ record = MultiRecord()
+ # Read the fields from the segment lines
+ d_seg = _header.read_seg_lines(header_lines[1:])
+ # Set the object's segment line fields
+ for field in _header.seg_field_specs:
+ setattr(record, field, d_seg[field])
+ # Set the objects' record line fields
+ for field in _header.rec_field_specs:
+ setattr(record, field, d_rec[field])
+ # Determine whether the record is fixed or variable
+ if record.seg_len[0] == 0:
+ record.layout = 'Variable'
+ else:
+ record.layout = 'Fixed'
+
+ # If specified, read the segment headers
+ if rd_segments:
+ record.segments = []
+ # Get the base record name (could be empty)
+ dirname = os.path.split(record_name)[0]
+ for s in record.seg_name:
+ if s == '~':
+ record.segments.append(None)
+ else:
+ record.segments.append(rdheader(os.path.join(dirname,s), pb_dir))
+ # Fill in the sig_name attribute
+ record.sig_name = record.get_sig_name()
+ # Fill in the sig_segments attribute
+ record.sig_segments = record.get_sig_segments()
+
+ # Set the comments field
+ record.comments = []
+ for line in comment_lines:
+ record.comments.append(line.strip(' \t#'))
+
+ return record
+
+
+def rdrecord(record_name, sampfrom=0, sampto='end', channels='all',
+ physical=True, pb_dir=None, m2s=True, smooth_frames=True,
+ ignore_skew=False, return_res=64):
+ """
+ Read a WFDB record and return the signal and record descriptors as
+ attributes in a Record or MultiRecord object.
+
+ Parameters
+ ----------
+ record_name : str
+ The name of the WFDB record to be read (without any file extensions).
+ If the argument contains any path delimiter characters, the argument
+ will be interpreted as PATH/baserecord and the data files will be
+ searched for in the local path.
+ sampfrom : int, optional
+ The starting sample number to read for each channel.
+ sampto : int, or 'end', optional
+ The sample number at which to stop reading for each channel. Leave as
+ 'end' to read the entire duration.
+ channels : list, or 'all', optional
+ List of integer indices specifying the channels to be read. Leave as
+ 'all' to read all channels.
+ physical : bool, optional
+ Specifies whether to return signals in physical units in the p_signal
+ field (True), or digital units in the d_signal field (False).
+ pb_dir : str, optional
+ Option used to stream data from Physiobank. The Physiobank database
+ directory from which to find the required record files.
+ eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
+ pb_dir='mitdb'.
+ m2s : bool, optional
+ Used when reading multi-segment records. Specifies whether to directly
+ return a wfdb MultiRecord object (False), or to convert it into and
+ return a wfdb Record object (True).
+ smooth_frames : bool, optional
+ Used when reading records with signals having multiple samples per
+ frame. Specifies whether to smooth the samples in signals with more than
+ one sample per frame and return an (MxN) uniform numpy array as the
+ `d_signal` or `p_signal` field (True), or to return a list of 1d numpy
+ arrays containing every expanded sample as the `e_d_signal` or
+ `e_p_signal` field (False).
+ ignore_skew : bool, optional
+ Used when reading records with at least one skewed signal. Specifies
+ whether to apply the skew to align the signals in the output variable
+ (False), or to ignore the skew field and load in all values contained in
+ the dat files unaligned (True).
+ return_res : int, optional
+ The numpy array dtype of the returned signals. Options are: 64, 32,
+ 16, and 8, where the value represents the numpy int or float dtype.
+ Note that the value cannot be 8 when physical is True since there is no
+ float8 format.
+
+ Returns
+ -------
+ record : Record or MultiRecord
+ The wfdb Record or MultiRecord object representing the contents of the
+ record read.
+
+ Notes
+ -----
+ If a signal range or channel selection is specified when calling this
+ function, the resulting attributes of the returned object will be set to
+ reflect the section of the record that is actually read, rather than
+ necessarily the entire record. For example, if channels=[0, 1, 2] is
+ specified when reading a 12 channel record, the 'n_sig' attribute will be 3,
+ not 12.
+
+ The `rdsamp` function exists as a simple alternative to `rdrecord` for
+ the common purpose of extracting the physical signals and a few important
+ descriptor fields. `rdsamp` returns two arguments:
+
+ Examples
+ --------
+ >>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
+ channels = [1,3])
+
+ """
+
+ dirname, base_record_name = os.path.split(record_name)
+
+ # Read the header fields
+ record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)
+
+ # Set defaults for sampto and channels input variables
+ if sampto == 'end':
+ sampto = record.sig_len
+ if channels == 'all':
+ channels = list(range(record.n_sig))
+
+ # Ensure that input fields are valid for the record
+ record.check_read_inputs(sampfrom, sampto, channels, physical, m2s, smooth_frames, return_res)
+
+ # A single segment record
+ if isinstance(record, Record):
+
+ # Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
+ if smooth_frames or max([record.samps_per_frame[c] for c in channels])==1:
+ # Read signals from the associated dat files that contain wanted channels
+ record.d_signal = _signal.rd_segment(record.file_name, dirname, pb_dir, record.n_sig, record.fmt, record.sig_len,
+ record.byte_offset, record.samps_per_frame, record.skew,
+ sampfrom, sampto, channels, smooth_frames, ignore_skew)
+
+ # Arrange/edit the object fields to reflect user channel and/or signal range input
+ record.arrange_fields(channels, expanded=False)
+
+ if physical:
+ # Perform inplace dac to get physical signal
+ record.dac(expanded=False, return_res=return_res, inplace=True)
+
+ # Return each sample of the signals with multiple samples per frame
+ else:
+ record.e_d_signal = _signal.rd_segment(record.file_name, dirname, pb_dir, record.n_sig, record.fmt, record.sig_len,
+ record.byte_offset, record.samps_per_frame, record.skew,
+ sampfrom, sampto, channels, smooth_frames, ignore_skew)
+
+ # Arrange/edit the object fields to reflect user channel and/or signal range input
+ record.arrange_fields(channels, expanded=True)
+
+ if physical is True:
+ # Perform dac to get physical signal
+ record.dac(expanded=True, return_res=return_res, inplace=True)
+
+ # A multi segment record
+ else:
+ # Strategy:
+ # 1. Read the required segments and store them in
+ # Record objects.
+ # 2. Update the parameters of the objects to reflect
+ # the state of the sections read.
+ # 3. Update the parameters of the overall MultiRecord
+ # object to reflect the state of the individual segments.
+ # 4. If specified, convert the MultiRecord object
+ # into a single Record object.
+
+ # Segments field is a list of Record objects
+ # Empty segments store None.
+
+ record.segments = [None]*record.n_seg
+
+ # Variable layout
+ if record.seg_len[0] == 0:
+ record.layout = 'Variable'
+ # Read the layout specification header
+ record.segments[0] = rdheader(os.path.join(dirname, record.seg_name[0]), pb_dir=pb_dir)
+ # Fixed layout
+ else:
+ record.layout = 'Fixed'
+
+ # The segment numbers and samples within each segment to read.
+ readsegs, segranges = record.required_segments(sampfrom, sampto, channels)
+ # The signals within each segment to read
+ segsigs = record.required_signal(readsegs, channels, dirname, pb_dir)
+
+ # Read the desired samples in the relevant segments
+ for i in range(len(readsegs)):
+ segnum = readsegs[i]
+ # Empty segment or segment with no relevant channels
+ if record.seg_name[segnum] == '~' or segsigs[i] is None:
+ record.segments[segnum] = None
+ else:
+ record.segments[segnum] = rdrecord(
+ os.path.join(dirname, record.seg_name[segnum]),
+ sampfrom=segranges[i][0], sampto=segranges[i][1],
+ channels=segsigs[i], physical=True, pb_dir=pb_dir)
+
+ # Arrange the fields of the overall object to reflect user input
+ record.arrange_fields(readsegs, segranges, channels)
+
+ # Convert object into a single segment Record object
+ if m2s:
+ record = record.multi_to_single(return_res=return_res)
+
+ # Perform dtype conversion if necessary
+ if isinstance(record, Record) and record.n_sig>0:
+ record.convert_dtype(physical, return_res, smooth_frames)
+
+ return record
+
+
+def rdsamp(record_name, sampfrom=0, sampto='end', channels='all', pb_dir=None):
+ """
+ Read a WFDB record, and return the physical signals and a few important
+ descriptor fields.
+
+ Parameters
+ ----------
+ record_name : str
+ The name of the WFDB record to be read (without any file extensions).
+ If the argument contains any path delimiter characters, the argument
+ will be interpreted as PATH/baserecord and the data files will be
+ searched for in the local path.
+ sampfrom : int, optional
+ The starting sample number to read for each channel.
+ sampto : int, or 'end', optional
+ The sample number at which to stop reading for each channel. Leave as
+ 'end' to read the entire duration.
+ channels : list, or 'all', optional
+ List of integer indices specifying the channels to be read. Leave as
+ 'all' to read all channels.
+ pb_dir : str, optional
+ Option used to stream data from Physiobank. The Physiobank database
+ directory from which to find the required record files.
+ eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
+ pb_dir='mitdb'.
+
+ Returns
+ -------
+ signals : numpy array
+ A 2d numpy array storing the physical signals from the record.
+ fields : dict
+ A dictionary containing several key attributes of the read record:
+ - fs: The sampling frequency of the record
+ - units: The units for each channel
+ - sig_name: The signal name for each channel
+ - comments: Any comments written in the header
+
+ Notes
+ -----
+ If a signal range or channel selection is specified when calling this
+ function, the resulting attributes of the returned object will be set to
+ reflect the section of the record that is actually read, rather than
+ necessarily the entire record. For example, if channels=[0, 1, 2] is
+ specified when reading a 12 channel record, the 'n_sig' attribute will be 3,
+ not 12.
+
+ The `rdrecord` function is the base function upon which this one is built.
+ It returns all attributes present, along with the signals, as attributes in
+ a `Record` object. The function, along with the returned data type, has more
+ options than `rdsamp` for users who wish to more directly manipulate WFDB
+ content.
+
+ Examples
+ --------
+ >>> signals, fields = wfdb.rdsamp('sample-data/test01_00s', sampfrom=800,
+ channel =[1,3])
+
+ """
+
+ record = rdrecord(record_name, sampfrom, sampto, channels, True, pb_dir, True)
+
+ signals = record.p_signal
+ fields = {}
+ for field in ['fs','units','sig_name', 'comments']:
+ fields[field] = getattr(record, field)
+
+ return signals, fields
+
+
+def wanted_siginds(wanted_sig_names, record_sig_names):
+ """
+ Given some wanted signal names, and the signal names contained
+ in a record, return the indices of the record channels that intersect.
+ Remember that the wanted signal names are already in order specified in user input channels.
+ """
+ contained_signal = [s for s in wanted_sig_names if s in record_sig_names]
+ if contained_signal == []:
+ return None
+ else:
+ return [record_sig_names.index(s) for s in contained_signal]
+
+
+
+
+#------------------- /Reading Records -------------------#
+
+
+def wrsamp(record_name, fs, units, sig_name, p_signal=None, d_signal=None,
+ fmt=None, adc_gain=None, baseline=None, comments=None, base_time=None,
+ base_date=None, write_dir=''):
+ """
+ Write a single segment WFDB record, creating a WFDB header file and any
+ associated dat files.
+
+ Parameters
+ ----------
+ record_name : str
+ The string name of the WFDB record to be written (without any file
+ extensions).
+ fs : int, or float
+ The sampling frequency of the record.
+ units : list
+ A list of strings giving the units of each signal channel.
+ sig_name :
+ A list of strings giving the signal name of each signal channel.
+ p_signal : numpy array, optional
+ An (MxN) 2d numpy array, where M is the signal length. Gives the
+ physical signal values intended to be written. Either p_signal or
+ d_signal must be set, but not both. If p_signal is set, this method will
+ use it to perform analogue-digital conversion, writing the resultant
+ digital values to the dat file(s). If fmt is set, gain and baseline must
+ be set or unset together. If fmt is unset, gain and baseline must both
+ be unset.
+ d_signal : numpy array, optional
+ An (MxN) 2d numpy array, where M is the signal length. Gives the
+ digital signal values intended to be directly written to the dat
+ file(s). The dtype must be an integer type. Either p_signal or d_signal
+ must be set, but not both. In addition, if d_signal is set, fmt, gain
+ and baseline must also all be set.
+ fmt : list, optional
+ A list of strings giving the WFDB format of each file used to store each
+ channel. Accepted formats are: '80','212",'16','24', and '32'. There are
+ other WFDB formats as specified by:
+ https://www.physionet.org/physiotools/wag/signal-5.htm
+ but this library will not write (though it will read) those file types.
+ adc_gain : list, optional
+ A list of numbers specifying the ADC gain.
+ baseline : list, optional
+ A list of integers specifying the digital baseline.
+ comments : list, optional
+ A list of string comments to be written to the header file.
+ base_time : str, optional
+ A string of the record's start time in 24h 'HH:MM:SS(.ms)' format.
+ base_date : str, optional
+ A string of the record's start date in 'DD/MM/YYYY' format.
+ write_dir : str, optional
+ The directory in which to write the files.
+
+ Notes
+ -----
+ This is a gateway function, written as a simple method to write WFDB record
+ files using the most common parameters. Therefore not all WFDB fields can be
+ set via this function.
+
+ For more control over attributes, create a `Record` object, manually set its
+ attributes, and call its `wrsamp` instance method. If you choose this more
+ advanced method, see also the `set_defaults`, `set_d_features`, and
+ `set_p_features` instance methods to help populate attributes.
+
+ Examples
+ --------
+ >>> # Read part of a record from Physiobank
+ >>> signals, fields = wfdb.rdsamp('a103l', sampfrom=50000, channels=[0,1],
+ pb_dir='challenge/2015/training')
+ >>> # Write a local WFDB record (manually inserting fields)
+ >>> wfdb.wrsamp('ecgrecord', fs = 250, units=['mV', 'mV'],
+ sig_name=['I', 'II'], p_signal=signals, fmt=['16', '16'])
+
+ """
+
+ # Check input field combinations
+ if p_signal is not None and d_signal is not None:
+ raise Exception('Must only give one of the inputs: p_signal or d_signal')
+ if d_signal is not None:
+ if fmt is None or adc_gain is None or baseline is None:
+ raise Exception("When using d_signal, must also specify 'fmt', 'gain', and 'baseline' fields.")
+ # Depending on whether d_signal or p_signal was used, set other required features.
+ if p_signal is not None:
+ # Create the Record object
+ record = Record(record_name=record_name, p_signal=p_signal, fs=fs,
+ fmt=fmt, units=units, sig_name=sig_name,
+ adc_gain=adc_gain, baseline=baseline,
+ comments=comments, base_time=base_time,
+ base_date=base_date)
+ # Compute optimal fields to store the digital signal, carry out adc,
+ # and set the fields.
+ record.set_d_features(do_adc=1)
+ else:
+ # Create the Record object
+ record = Record(record_name=record_name, d_signal=d_signal, fs=fs,
+ fmt=fmt, units=units, sig_name=sig_name,
+ adc_gain=adc_gain, baseline=baseline,
+ comments=comments, base_time=base_time,
+ base_date=base_date)
+ # Use d_signal to set the fields directly
+ record.set_d_features()
+
+ # Set default values of any missing field dependencies
+ record.set_defaults()
+ # Write the record files - header and associated dat
+ record.wrsamp(write_dir=write_dir)
+
+
+# Time string parser for WFDB header - H(H):M(M):S(S(.sss)) format.
+def parse_timestring(timestring):
+ times = re.findall("(?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2}[.\d+]*)", timestring)
+
+ if not times:
+ raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
+ else:
+ hours, minutes, seconds = times[0]
+
+ if not hours or not minutes or not seconds:
+ raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
+
+ hours = int(hours)
+ minutes = int(minutes)
+ seconds = float(seconds)
+
+ if int(hours) >23:
+ raise ValueError('hours must be < 24')
+ elif hours<0:
+ raise ValueError('hours must be positive')
+ if minutes>59:
+ raise ValueError('minutes must be < 60')
+ elif minutes<0:
+ raise ValueError('minutes must be positive')
+ if seconds>59:
+ raise ValueError('seconds must be < 60')
+ elif seconds<0:
+ raise ValueError('seconds must be positive')
+
+ return (hours, minutes, seconds)
+
+# Date string parser for WFDB header - DD/MM/YYYY
+def parse_datestring(datestring):
+ dates = re.findall(r"(?P\d{2})/(?P\d{2})/(?P\d{4})", datestring)
+
+ if not dates:
+ raise ValueError("Invalid date string. Acceptable format is: 'DD/MM/YYYY'")
+ else:
+ day, month, year = dates[0]
+
+ day = int(day)
+ month = int(month)
+ year = int(year)
+
+ if year<1:
+ raise ValueError('year must be positive')
+ if month<1 or month>12:
+ raise ValueError('month must be between 1 and 12')
+ if day not in range(1, monthrange(year, month)[1]+1):
+ raise ValueError('day does not exist for specified year and month')
+
+ return (day, month, year)
+
+# Returns the unique elements in a list in the order that they appear.
+# Also returns the indices of the original list that correspond to each output element.
+def orderedsetlist(fulllist):
+ uniquelist = []
+ original_inds = {}
+
+ for i in range(0, len(fulllist)):
+ item = fulllist[i]
+ # new item
+ if item not in uniquelist:
+ uniquelist.append(item)
+ original_inds[item] = [i]
+ # previously seen item
+ else:
+ original_inds[item].append(i)
+ return uniquelist, original_inds
+
+# Returns elements in a list without consecutive repeated values.
+def orderednoconseclist(fulllist):
+ noconseclist = [fulllist[0]]
+ if len(fulllist) == 1:
+ return noconseclist
+ for i in fulllist:
+ if i!= noconseclist[-1]:
+ noconseclist.append(i)
+ return noconseclist
+
+
+def dl_database(db_dir, dl_dir, records='all', annotators='all', keep_subdirs=True,
+ overwrite = False):
+ """
+ Download WFDB record (and optionally annotation) files from a
+ Physiobank database. The database must contain a 'RECORDS' file in
+ its base directory which lists its WFDB records.
+
+ Parameters
+ ----------
+ db_dir : str
+ The Physiobank database directory to download. eg. For database:
+ 'http://physionet.org/physiobank/database/mitdb', db_dir='mitdb'.
+ dl_dir : str
+ The full local directory path in which to download the files.
+ records : list, or 'all', optional
+ A list of strings specifying the WFDB records to download. Leave
+ as 'all' to download all records listed in the database's
+ RECORDS file.
+ eg. records=['test01_00s', test02_45s] for database:
+ https://physionet.org/physiobank/database/macecgdb/
+ annotators : list, 'all', or None, optional
+ A list of strings specifying the WFDB annotation file types to
+ download along with the record files. Is either None to skip
+ downloading any annotations, 'all' to download all annotation
+ types as specified by the ANNOTATORS file, or a list of strings
+ which each specify an annotation extension.
+ eg. annotators = ['anI'] for database:
+ https://physionet.org/physiobank/database/prcp/
+ keep_subdirs : bool, optional
+ Whether to keep the relative subdirectories of downloaded files
+ as they are organized in Physiobank (True), or to download all
+ files into the same base directory (False).
+ overwrite : bool, optional
+ If True, all files will be redownloaded regardless. If False,
+ existing files with the same name and relative subdirectory will
+ be checked. If the local file is the same size as the online
+ file, the download is skipped. If the local file is larger, it
+ will be deleted and the file will be redownloaded. If the local
+ file is smaller, the file will be assumed to be partially
+ downloaded and the remaining bytes will be downloaded and
+ appended.
+
+ Examples
+ --------
+ >>> wfdb.dl_database('ahadb', os.getcwd())
+
+ """
+ # Check if the database is valid
+ r = requests.get(dburl)
+ r.raise_for_status()
+
+ # Get the list of records
+ recordlist = download.get_record_list(db_dir, records)
+ # Get the annotator extensions
+ annotators = download.get_annotators(db_dir, annotators)
+
+ # All files to download (relative to the database's home directory)
+ allfiles = []
+
+ for rec in recordlist:
+ # Check out whether each record is in MIT or EDF format
+ if rec.endswith('.edf'):
+ allfiles.append(rec)
+ else:
+ # If MIT format, have to figure out all associated files
+ allfiles.append(rec+'.hea')
+ dirname, baserecname = os.path.split(rec)
+ record = rdheader(baserecname, pb_dir=posixpath.join(db_dir, dirname))
+
+ # Single segment record
+ if isinstance(record, Record):
+ # Add all dat files of the segment
+ for file in record.file_name:
+ allfiles.append(posixpath.join(dirname, file))
+
+ # Multi segment record
+ else:
+ for seg in record.seg_name:
+ # Skip empty segments
+ if seg == '~':
+ continue
+ # Add the header
+ allfiles.append(posixpath.join(dirname, seg+'.hea'))
+ # Layout specifier has no dat files
+ if seg.endswith('_layout'):
+ continue
+ # Add all dat files of the segment
+ recseg = rdheader(seg, pb_dir=posixpath.join(db_dir, dirname))
+ for file in recseg.file_name:
+ allfiles.append(posixpath.join(dirname, file))
+ # check whether the record has any requested annotation files
+ if annotators is not None:
+ for a in annotators:
+ annfile = rec+'.'+a
+ url = posixpath.join(download.db_index_url, db_dir, annfile)
+ rh = requests.head(url)
+
+ if rh.status_code != 404:
+ allfiles.append(annfile)
+
+ dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], db_dir, dl_dir, keep_subdirs, overwrite) for file in allfiles]
+
+ # Make any required local directories
+ download.make_local_dirs(dl_dir, dlinputs, keep_subdirs)
+
+ print('Downloading files...')
+ # Create multiple processes to download files.
+ # Limit to 2 connections to avoid overloading the server
+ pool = multiprocessing.Pool(processes=2)
+ pool.map(download.dl_pb_file, dlinputs)
+ print('Finished downloading files')
+
+ return
+
+
+# ---------- For storing WFDB Signal definitions ---------- #
+
+
+# Unit scales used for default display scales.
+unit_scale = {
+ 'Voltage': ['pV', 'nV', 'uV', 'mV', 'V', 'kV'],
+ 'Temperature': ['C'],
+ 'Pressure': ['mmHg'],
+}
+
+
+
+# Signal class with all its parameters
+class SignalClass(object):
+ def __init__(self, abbreviation, description, signalnames):
+ self.abbreviation = abbreviation
+ self.description = description
+ # names that are assigned to this signal type
+ self.signalnames = signalnames
+
+ def __str__(self):
+ return self.abbreviation
+
+# All signal types. Make sure signal names are in lower case.
+sig_classes = [
+ SignalClass('BP', 'Blood Pressure', ['bp','abp','pap','cvp',]),
+ SignalClass('CO2', 'Carbon Dioxide', ['co2']),
+ SignalClass('CO', 'Carbon Monoxide', ['co']),
+ SignalClass('ECG', 'Electrocardiogram', ['i','ii','iii','iv','v','avr']),
+ SignalClass('EEG', 'Electroencephalogram',['eeg']),
+ SignalClass('EMG', 'Electromyograph', ['emg']),
+ SignalClass('EOG', 'Electrooculograph', ['eog']),
+ SignalClass('HR', 'Heart Rate', ['hr']),
+ SignalClass('MMG', 'Magnetomyograph', ['mmg']),
+ SignalClass('O2', 'Oxygen', ['o2','sp02']),
+ SignalClass('PLETH', 'Plethysmograph', ['pleth']),
+ SignalClass('RESP', 'Respiration', ['resp']),
+ SignalClass('SCG', 'Seismocardiogram', ['scg']),
+ SignalClass('STAT', 'Status', ['stat','status']), # small integers indicating status
+ SignalClass('ST', 'ECG ST Segment', ['st']),
+ SignalClass('TEMP', 'Temperature', ['temp']),
+ SignalClass('UNKNOWN', 'Unknown Class', []),
+]
diff --git a/wfdb/plot/__init__.py b/wfdb/plot/__init__.py
index e69de29b..b016477e 100644
--- a/wfdb/plot/__init__.py
+++ b/wfdb/plot/__init__.py
@@ -0,0 +1,4 @@
+"""
+The plot subpackage contains tools for plotting signals and annotations.
+"""
+from .plot import plot_items, plot_wfdb, plot_all_records
diff --git a/wfdb/plot/plot.py b/wfdb/plot/plot.py
new file mode 100644
index 00000000..5004b4d7
--- /dev/null
+++ b/wfdb/plot/plot.py
@@ -0,0 +1,488 @@
+import matplotlib.pyplot as plt
+import numpy as np
+import os
+
+from ..io.record import Record, rdrecord
+from ..io._header import float_types
+from ..io._signal import downround, upround
+from ..io.annotation import Annotation
+
+
+def plot_items(signal=None, ann_samp=None, ann_sym=None, fs=None,
+ time_units='samples', sig_name=None, sig_units=None,
+ ylabel=None, title=None, sig_style=[''], ann_style=['r*'],
+ ecg_grids=[], figsize=None, return_fig=False):
+ """
+ Subplot individual channels of signals and/or annotations.
+
+ Parameters
+ ----------
+ signal : 1d or 2d numpy array, optional
+ The uniformly sampled signal to be plotted. If signal.ndim is 1, it is
+ assumed to be a one channel signal. If it is 2, axes 0 and 1, must
+ represent time and channel number respectively.
+ ann_samp: list, optional
+ A list of annotation locations to plot, with each list item
+ corresponding to a different channel. List items may be:
+ - 1d numpy array, with values representing sample indices
+ - list, with values representing sample indices
+ - None. For channels in which nothing is to be plotted.
+ If `signal` is defined, the annotation locations will be overlaid on
+ the signals, with the list index corresponding to the signal channel.
+ The length of `annotation` does not have to match the number of
+ channels of `signal`.
+ ann_sym: list, optional
+ A list of annotation symbols to plot, with each list item
+ corresponding to a different channel. List items should be lists of
+ strings. The symbols are plotted over the corresponding `ann_samp`
+ index locations.
+ fs : int or float, optional
+ The sampling frequency of the signals and/or annotations. Used to
+ calculate time intervals if `time_units` is not 'samples'. Also
+ required for plotting ecg grids.
+ time_units : str, optional
+ The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes',
+ and 'hours'.
+ sig_name : list, optional
+ A list of strings specifying the signal names. Used with `sig_units`
+ to form y labels, if `ylabel` is not set.
+ sig_units : list, optional
+ A list of strings specifying the units of each signal channel. Used
+ with `sig_name` to form y labels, if `ylabel` is not set. This
+ parameter is required for plotting ecg grids.
+ ylabel : list, optional
+ A list of strings specifying the final y labels. If this option is
+ present, `sig_name` and `sig_units` will not be used for labels.
+ title : str, optional
+ The title of the graph.
+ sig_style : list, optional
+ A list of strings, specifying the style of the matplotlib plot for each
+ signal channel. If the list has a length of 1, the style will be used
+ for all channels.
+ ann_style : list, optional
+ A list of strings, specifying the style of the matplotlib plot for each
+ annotation channel. If the list has a length of 1, the style will be
+ used for all channels.
+ ecg_grids : list, optional
+ A list of integers specifying channels in which to plot ecg grids. May
+ also be set to 'all' for all channels. Major grids at 0.5mV, and minor
+ grids at 0.125mV. All channels to be plotted with grids must have
+ `sig_units` equal to 'uV', 'mV', or 'V'.
+ figsize : tuple, optional
+ Tuple pair specifying the width, and height of the figure. It is the
+ 'figsize' argument passed into matplotlib.pyplot's `figure` function.
+ return_fig : bool, optional
+ Whether the figure is to be returned as an output argument.
+
+ Returns
+ -------
+ figure : matplotlib figure, optional
+ The matplotlib figure generated. Only returned if the 'return_fig'
+ parameter is set to True.
+
+ Examples
+ --------
+ >>> record = wfdb.rdrecord('sample-data/100', sampto=3000)
+ >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=3000)
+
+ >>> wfdb.plot_items(signal=record.p_signal,
+ annotation=[ann.sample, ann.sample],
+ title='MIT-BIH Record 100', time_units='seconds',
+ figsize=(10,4), ecg_grids='all')
+
+ """
+
+ # Figure out number of subplots required
+ sig_len, n_sig, n_annot, n_subplots = get_plot_dims(signal, ann_samp)
+
+ # Create figure
+ fig, axes = create_figure(n_subplots, figsize)
+
+ if signal is not None:
+ plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes)
+
+ if ann_samp is not None:
+ plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs,
+ time_units, ann_style, axes)
+
+ if ecg_grids:
+ plot_ecg_grids(ecg_grids, fs, sig_units, time_units, axes)
+
+ # Add title and axis labels.
+ label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,
+ title)
+
+ plt.show(fig)
+
+ if return_fig:
+ return fig
+
+def get_plot_dims(signal, ann_samp):
+ "Figure out the number of plot channels"
+ if signal is not None:
+ if signal.ndim == 1:
+ sig_len = len(signal)
+ n_sig = 1
+ else:
+ sig_len = signal.shape[0]
+ n_sig = signal.shape[1]
+ else:
+ sig_len = 0
+ n_sig = 0
+
+ if ann_samp is not None:
+ n_annot = len(ann_samp)
+ else:
+ n_annot = 0
+
+ return sig_len, n_sig, n_annot, max(n_sig, n_annot)
+
+
+def create_figure(n_subplots, figsize):
+ "Create the plot figure and subplot axes"
+ fig = plt.figure(figsize=figsize)
+ axes = []
+
+ for i in range(n_subplots):
+ axes.append(fig.add_subplot(n_subplots, 1, i+1))
+
+ return fig, axes
+
+
+def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
+ "Plot signal channels"
+
+ # Extend signal style if necesary
+ if len(sig_style) == 1:
+ sig_style = n_sig * sig_style
+
+ # Figure out time indices
+ if time_units == 'samples':
+ t = np.linspace(0, sig_len-1, sig_len)
+ else:
+ downsample_factor = {'seconds':fs, 'minutes':fs * 60,
+ 'hours':fs * 3600}
+ t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units]
+
+ # Plot the signals
+ if signal.ndim == 1:
+ axes[0].plot(t, signal, sig_style[0], zorder=3)
+ else:
+ for ch in range(n_sig):
+ axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3)
+
+
+def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units,
+ ann_style, axes):
+ "Plot annotations, possibly overlaid on signals"
+ # Extend annotation style if necesary
+ if len(ann_style) == 1:
+ ann_style = n_annot * ann_style
+
+ # Figure out downsample factor for time indices
+ if time_units == 'samples':
+ downsample_factor = 1
+ else:
+ downsample_factor = {'seconds':float(fs), 'minutes':float(fs)*60,
+ 'hours':float(fs)*3600}[time_units]
+
+ # Plot the annotations
+ for ch in range(n_annot):
+ if ann_samp[ch] is not None:
+ # Figure out the y values to plot on a channel basis
+
+ # 1 dimensional signals
+ if n_sig > ch:
+ if signal.ndim == 1:
+ y = signal[ann_samp[ch]]
+ else:
+ y = signal[ann_samp[ch], ch]
+ else:
+ y = np.zeros(len(ann_samp[ch]))
+
+ axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch])
+
+ # Plot the annotation symbols if any
+ if ann_sym is not None and ann_sym[ch] is not None:
+ for i, s in enumerate(ann_sym[ch]):
+ axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor,
+ y[i]))
+
+
+def plot_ecg_grids(ecg_grids, fs, units, time_units, axes):
+ "Add ecg grids to the axes"
+ if ecg_grids == 'all':
+ ecg_grids = range(0, len(axes))
+
+
+ for ch in ecg_grids:
+ # Get the initial plot limits
+ auto_xlims = axes[ch].get_xlim()
+ auto_ylims= axes[ch].get_ylim()
+
+ (major_ticks_x, minor_ticks_x, major_ticks_y,
+ minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1],
+ units[ch], fs, auto_xlims[1],
+ time_units)
+
+ min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)
+ min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)
+
+ for tick in minor_ticks_x:
+ axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed',
+ marker='|', zorder=1)
+ for tick in major_ticks_x:
+ axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa',
+ marker='|', zorder=2)
+ for tick in minor_ticks_y:
+ axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed',
+ marker='_', zorder=1)
+ for tick in major_ticks_y:
+ axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa',
+ marker='_', zorder=2)
+
+ # Plotting the lines changes the graph. Set the limits back
+ axes[ch].set_xlim(auto_xlims)
+ axes[ch].set_ylim(auto_ylims)
+
+def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units):
+ """
+ Calculate tick intervals for ecg grids
+
+ - 5mm 0.2s major grids, 0.04s minor grids
+ - 0.5mV major grids, 0.125 minor grids
+
+ 10 mm is equal to 1mV in voltage.
+ """
+ # Get the grid interval of the x axis
+ if time_units == 'samples':
+ majorx = 0.2 * fs
+ minorx = 0.04 * fs
+ elif time_units == 'seconds':
+ majorx = 0.2
+ minorx = 0.04
+ elif time_units == 'minutes':
+ majorx = 0.2 / 60
+ minorx = 0.04/60
+ elif time_units == 'hours':
+ majorx = 0.2 / 3600
+ minorx = 0.04 / 3600
+
+ # Get the grid interval of the y axis
+ if sig_units.lower()=='uv':
+ majory = 500
+ minory = 125
+ elif sig_units.lower()=='mv':
+ majory = 0.5
+ minory = 0.125
+ elif sig_units.lower()=='v':
+ majory = 0.0005
+ minory = 0.000125
+ else:
+ raise ValueError('Signal units must be uV, mV, or V to plot ECG grids.')
+
+ major_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, majorx)
+ minor_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, minorx)
+
+ major_ticks_y = np.arange(downround(minsig, majory),
+ upround(maxsig, majory) + 0.0001, majory)
+ minor_ticks_y = np.arange(downround(minsig, majory),
+ upround(maxsig, majory) + 0.0001, minory)
+
+ return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y)
+
+
+def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,
+ title):
+ "Add title, and axes labels"
+ if title:
+ axes[0].set_title(title)
+
+ # Determine y label
+ # Explicit labels take precedence if present. Otherwise, construct labels
+ # using signal names and units
+ if not ylabel:
+ ylabel = []
+ # Set default channel and signal names if needed
+ if not sig_name:
+ sig_name = ['ch_'+str(i) for i in range(n_subplots)]
+ if not sig_units:
+ sig_units = n_subplots * ['NU']
+
+ ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)]
+
+ for ch in range(n_subplots):
+ axes[ch].set_ylabel(ylabel[ch])
+
+ axes[-1].set_xlabel('/'.join(['time', time_units[:-1]]))
+
+
+def plot_wfdb(record=None, annotation=None, plot_sym=False,
+ time_units='samples', title=None, sig_style=[''],
+ ann_style=['r*'], ecg_grids=[], figsize=None, return_fig=False):
+ """
+ Subplot individual channels of a wfdb record and/or annotation.
+
+ This function implements the base functionality of the `plot_items`
+ function, while allowing direct input of wfdb objects.
+
+ If the record object is input, the function will extract from it:
+ - signal values, from the `p_signal` (priority) or `d_signal` attribute
+ - sampling frequency, from the `fs` attribute
+ - signal names, from the `sig_name` attribute
+ - signal units, from the `units` attribute
+
+ If the annotation object is input, the function will extract from it:
+ - sample locations, from the `sample` attribute
+ - symbols, from the `symbol` attribute
+ - the annotation channels, from the `chan` attribute
+ - the sampling frequency, from the `fs` attribute if present, and if fs
+ was not already extracted from the `record` argument.
+
+
+ Parameters
+ ----------
+ record : wfdb Record, optional
+ The Record object to be plotted
+ annotation : wfdb Annotation, optional
+ The Annotation object to be plotted
+ plot_sym : bool, optional
+ Whether to plot the annotation symbols on the graph.
+ time_units : str, optional
+ The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes',
+ and 'hours'.
+ title : str, optional
+ The title of the graph.
+ sig_style : list, optional
+ A list of strings, specifying the style of the matplotlib plot for each
+ signal channel. If the list has a length of 1, the style will be used
+ for all channels.
+ ann_style : list, optional
+ A list of strings, specifying the style of the matplotlib plot for each
+ annotation channel. If the list has a length of 1, the style will be
+ used for all channels.
+ ecg_grids : list, optional
+ A list of integers specifying channels in which to plot ecg grids. May
+ also be set to 'all' for all channels. Major grids at 0.5mV, and minor
+ grids at 0.125mV. All channels to be plotted with grids must have
+ `sig_units` equal to 'uV', 'mV', or 'V'.
+ figsize : tuple, optional
+ Tuple pair specifying the width, and height of the figure. It is the
+ 'figsize' argument passed into matplotlib.pyplot's `figure` function.
+ return_fig : bool, optional
+ Whether the figure is to be returned as an output argument.
+
+ Returns
+ -------
+ figure : matplotlib figure, optional
+ The matplotlib figure generated. Only returned if the 'return_fig'
+ option is set to True.
+
+ Examples
+ --------
+ >>> record = wfdb.rdrecord('sample-data/100', sampto=3000)
+ >>> annotation = wfdb.rdann('sample-data/100', 'atr', sampto=3000)
+
+ >>> wfdb.plot_wfdb(record=record, annotation=annotation, plot_sym=True
+ time_units='seconds', title='MIT-BIH Record 100',
+ figsize=(10,4), ecg_grids='all')
+
+ """
+ (signal, ann_samp, ann_sym, fs, sig_name,
+ sig_units) = get_wfdb_plot_items(record=record, annotation=annotation,
+ plot_sym=plot_sym)
+
+ return plot_items(signal=signal, ann_samp=ann_samp, ann_sym=ann_sym, fs=fs,
+ time_units=time_units, sig_name=sig_name,
+ sig_units=sig_units, title=title, sig_style=sig_style,
+ ann_style=ann_style, ecg_grids=ecg_grids,
+ figsize=figsize, return_fig=return_fig)
+
+
+def get_wfdb_plot_items(record, annotation, plot_sym):
+ """
+ Get items to plot from wfdb objects
+ """
+ # Get record attributes
+ if record:
+ if record.p_signal is not None:
+ signal = record.p_signal
+ elif record.d_signal is not None:
+ signal = record.d_signal
+ else:
+ raise ValueError('The record has no signal to plot')
+
+ fs = record.fs
+ sig_name = record.sig_name
+ sig_units = record.units
+ else:
+ signal, fs, sig_name, sig_units = 4 * [None]
+
+ # Get annotation attributes
+ if annotation:
+ # Get channels
+ all_chans = set(annotation.chan)
+
+ n_chans = max(all_chans) + 1
+
+ # Just one channel. Place content in one list index.
+ # if len(all_chans) == 1:
+ # ann_samp = annotation.chan[0]*[None] + [annotation.sample]
+ # if plot_sym:
+ # ann_sym = annotation.chan[0]*[None] + [annotation.symbol]
+ # else:
+ # ann_sym = None
+ # # Split annotations by channel
+ # else:
+
+ # Indices for each channel
+ chan_inds = n_chans * [np.empty(0)]
+
+ for chan in all_chans:
+ chan_inds[chan] = np.where(annotation.chan == chan)[0]
+
+ ann_samp = [annotation.sample[ci] for ci in chan_inds]
+
+ if plot_sym:
+ ann_sym = n_chans * [None]
+ for ch in all_chans:
+ ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]]
+ else:
+ ann_sym = None
+
+ # Try to get fs from annotation if not already in record
+ if fs is None:
+ fs = annotation.fs
+ else:
+ ann_samp = None
+ ann_sym = None
+
+ return signal, ann_samp, ann_sym, fs, sig_name, sig_units
+
+
+def plot_all_records(directory=''):
+ """
+ Plot all wfdb records in a directory (by finding header files), one at
+ a time, until the 'enter' key is pressed.
+
+ Parameters
+ ----------
+ directory : str, optional
+ The directory in which to search for WFDB records. Defaults to
+ current working directory.
+
+ """
+ directory = directory or os.getcwd()
+
+ headers = [f for f in os.listdir(directory) if os.path.isfile(
+ os.path.join(directory, f))]
+ headers = [f for f in headers if f.endswith('.hea')]
+
+ records = [h.split('.hea')[0] for h in headers]
+ records.sort()
+
+ for record_name in records:
+ record = rdrecord(os.path.join(directory, record_name))
+
+ plot_wfdb(record, title='Record - %s' % record.record_name)
+ input('Press enter to continue...')
diff --git a/wfdb/plot/plots.py b/wfdb/plot/plots.py
deleted file mode 100644
index e8b70ef4..00000000
--- a/wfdb/plot/plots.py
+++ /dev/null
@@ -1,408 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-import os
-
-from ..readwrite import records
-from ..readwrite import _headers
-from ..readwrite import _signals
-from ..readwrite import annotations
-
-
-# Plot a WFDB Record's signals
-# Optionally, overlay annotation locations
-def plotrec(record=None, title=None, annotation=None, timeunits='samples',
- sigstyle='', annstyle='r*', plotannsym=False, figsize=None, returnfig=False, ecggrids=[]):
- """ Subplot and label each channel of a WFDB Record.
- Optionally, subplot annotation locations over selected channels.
-
- Usage:
- plotrec(record=None, title = None, annotation = None, timeunits='samples', sigstyle='',
- annstyle='r*', figsize=None, returnfig = False, ecggrids=[])
-
- Input arguments:
- - record (required): A wfdb Record object. The p_signals attribute will be plotted.
- - title (default=None): A string containing the title of the graph.
- - annotation (default=None): A list of Annotation objects or numpy arrays. The locations of the Annotation
- objects' 'sample' attribute, or the locations of the numpy arrays' values, will be overlaid on the signals.
- The list index of the annotation item corresponds to the signal channel that each annotation set will be
- plotted on. For channels without annotations to plot, put None in the list. This argument may also be just
- an Annotation object or numpy array, which will be plotted over channel 0.
- - timeunits (default='samples'): String specifying the x axis unit.
- Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'.
- - sigstyle (default='r*'): String, or list of strings, specifying the styling of the matplotlib plot for the signals.
- If 'sigstyle' is a string, each channel will have the same style. If it is a list, each channel's style will
- correspond to the list element. ie. sigtype=['r','b','k']
- - annstyle (default='r*'): String, or list of strings, specifying the styling of the matplotlib plot for the annotations.
- If 'annstyle' is a string, each channel will have the same style. If it is a list, each channel's style will
- correspond to the list element.
- - plotannsym (default=False): Specifies whether to plot the annotation symbols at their locations.
- - figsize (default=None): Tuple pair specifying the width, and height of the figure. Same as the 'figsize' argument
- passed into matplotlib.pyplot's figure() function.
- - returnfig (default=False): Specifies whether the figure is to be returned as an output argument
- - ecggrids (default=[]): List of integers specifying channels in which to plot ecg grids. May be set to [] for
- no channels, or 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be
- plotted with grids must have units equal to 'uV', 'mV', or 'V'.
-
- Output argument:
- - figure: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True.
-
- Example Usage:
- import wfdb
- record = wfdb.rdsamp('sampledata/100', sampto = 3000)
- annotation = wfdb.rdann('sampledata/100', 'atr', sampto = 3000)
-
- wfdb.plotrec(record, annotation = annotation, title='Record 100 from MIT-BIH Arrhythmia Database',
- timeunits = 'seconds', figsize = (10,4), ecggrids = 'all')
- """
-
- # Check the validity of items used to make the plot
- # Return the x axis time values to plot for the record (and annotation if any)
- t, tann, annplot = checkplotitems(record, title, annotation, timeunits, sigstyle, annstyle)
-
- siglen, nsig = record.p_signals.shape
-
- # Expand list styles
- if isinstance(sigstyle, str):
- sigstyle = [sigstyle]*record.nsig
- else:
- if len(sigstyle) < record.nsig:
- sigstyle = sigstyle+['']*(record.nsig-len(sigstyle))
- if isinstance(annstyle, str):
- annstyle = [annstyle]*record.nsig
- else:
- if len(annstyle) < record.nsig:
- annstyle = annstyle+['r*']*(record.nsig-len(annstyle))
-
- # Expand ecg grid channels
- if ecggrids == 'all':
- ecggrids = range(0, record.nsig)
-
- # Create the plot
- fig=plt.figure(figsize=figsize)
-
- for ch in range(nsig):
- # Plot signal channel
- ax = fig.add_subplot(nsig, 1, ch+1)
- ax.plot(t, record.p_signals[:,ch], sigstyle[ch], zorder=3)
-
- if (title is not None) and (ch==0):
- plt.title(title)
-
- # Plot annotation if specified
- if annplot[ch] is not None:
- ax.plot(tann[ch], record.p_signals[annplot[ch], ch], annstyle[ch])
- # Plot the annotation symbols if specified
- if plotannsym:
- for i, s in enumerate(annotation.symbol):
- ax.annotate(s, (tann[ch][i], record.p_signals[annplot[ch], ch][i]))
-
- # Axis Labels
- if timeunits == 'samples':
- plt.xlabel('index/sample')
- else:
- plt.xlabel('time/'+timeunits[:-1])
-
- if record.signame[ch] is not None:
- chanlabel=record.signame[ch]
- else:
- chanlabel='channel'
- if record.units[ch] is not None:
- unitlabel=record.units[ch]
- else:
- unitlabel='NU'
- plt.ylabel(chanlabel+"/"+unitlabel)
-
- # Show standard ecg grids if specified.
- if ch in ecggrids:
-
- auto_xlims = ax.get_xlim()
- auto_ylims= ax.get_ylim()
-
- major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y = calc_ecg_grids(
- auto_ylims[0], auto_ylims[1], record.units[ch], record.fs, auto_xlims[1], timeunits)
-
- min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)
- min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)
-
- for tick in minor_ticks_x:
- ax.plot([tick, tick], [min_y, max_y], c='#ededed', marker='|', zorder=1)
- for tick in major_ticks_x:
- ax.plot([tick, tick], [min_y, max_y], c='#bababa', marker='|', zorder=2)
- for tick in minor_ticks_y:
- ax.plot([min_x, max_x], [tick, tick], c='#ededed', marker='_', zorder=1)
- for tick in major_ticks_y:
- ax.plot([min_x, max_x], [tick, tick], c='#bababa', marker='_', zorder=2)
-
- # Plotting the lines changes the graph. Set the limits back
- ax.set_xlim(auto_xlims)
- ax.set_ylim(auto_ylims)
-
- plt.show(fig)
-
- # Return the figure if requested
- if returnfig:
- return fig
-
-# Calculate tick intervals for ecg grids
-def calc_ecg_grids(minsig, maxsig, units, fs, maxt, timeunits):
-
- # 5mm 0.2s major grids, 0.04s minor grids
- # 0.5mV major grids, 0.125 minor grids
- # 10 mm is equal to 1mV in voltage.
-
- # Get the grid interval of the x axis
- if timeunits == 'samples':
- majorx = 0.2*fs
- minorx = 0.04*fs
- elif timeunits == 'seconds':
- majorx = 0.2
- minorx = 0.04
- elif timeunits == 'minutes':
- majorx = 0.2/60
- minorx = 0.04/60
- elif timeunits == 'hours':
- majorx = 0.2/3600
- minorx = 0.04/3600
-
- # Get the grid interval of the y axis
- if units.lower()=='uv':
- majory = 500
- minory = 125
- elif units.lower()=='mv':
- majory = 0.5
- minory = 0.125
- elif units.lower()=='v':
- majory = 0.0005
- minory = 0.000125
- else:
- raise ValueError('Signal units must be uV, mV, or V to plot the ECG grid.')
-
-
- major_ticks_x = np.arange(0, _signals.upround(maxt, majorx)+0.0001, majorx)
- minor_ticks_x = np.arange(0, _signals.upround(maxt, majorx)+0.0001, minorx)
-
- major_ticks_y = np.arange(_signals.downround(minsig, majory), _signals.upround(maxsig, majory)+0.0001, majory)
- minor_ticks_y = np.arange(_signals.downround(minsig, majory), _signals.upround(maxsig, majory)+0.0001, minory)
-
- return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y)
-
-# Check the validity of items used to make the plot
-# Return the x axis time values to plot for the record (and time and values for annotation if any)
-def checkplotitems(record, title, annotation, timeunits, sigstyle, annstyle):
-
- # signals
- if not isinstance(record, records.Record):
- raise TypeError("The 'record' argument must be a valid wfdb.Record object")
- if not isinstance(record.p_signals, np.ndarray) or record.p_signals.ndim != 2:
- raise TypeError("The plotted signal 'record.p_signals' must be a 2d numpy array")
-
- siglen, nsig = record.p_signals.shape
-
- # fs and timeunits
- allowedtimes = ['samples', 'seconds', 'minutes', 'hours']
- if timeunits not in allowedtimes:
- raise ValueError("The 'timeunits' field must be one of the following: ", allowedtimes)
- # Get x axis values. fs must be valid when plotting time
- if timeunits == 'samples':
- t = np.linspace(0, siglen-1, siglen)
- else:
- if not isinstance(record.fs, _headers.floattypes):
- raise TypeError("The 'fs' field must be a number")
-
- if timeunits == 'seconds':
- t = np.linspace(0, siglen-1, siglen)/record.fs
- elif timeunits == 'minutes':
- t = np.linspace(0, siglen-1, siglen)/record.fs/60
- else:
- t = np.linspace(0, siglen-1, siglen)/record.fs/3600
-
- # units
- if record.units is None:
- record.units = ['NU']*nsig
- else:
- if not isinstance(record.units, list) or len(record.units)!= nsig:
- raise ValueError("The 'units' parameter must be a list of strings with length equal to the number of signal channels")
- for ch in range(nsig):
- if record.units[ch] is None:
- record.units[ch] = 'NU'
-
- # signame
- if record.signame is None:
- record.signame = ['ch'+str(ch) for ch in range(1, nsig+1)]
- else:
- if not isinstance(record.signame, list) or len(record.signame)!= nsig:
- raise ValueError("The 'signame' parameter must be a list of strings, with length equal to the number of signal channels")
-
- # title
- if title is not None and not isinstance(title, str):
- raise TypeError("The 'title' field must be a string")
-
- # signal line style
- if isinstance(sigstyle, str):
- pass
- elif isinstance(sigstyle, list):
- if len(sigstyle) > record.nsig:
- raise ValueError("The 'sigstyle' list cannot have more elements than the number of record channels")
- else:
- raise TypeError("The 'sigstyle' field must be a string or a list of strings")
-
- # annotation plot style
- if isinstance(annstyle, str):
- pass
- elif isinstance(annstyle, list):
- if len(annstyle) > record.nsig:
- raise ValueError("The 'annstyle' list cannot have more elements than the number of record channels")
- else:
- raise TypeError("The 'annstyle' field must be a string or a list of strings")
-
-
- # Annotations if any
- if annotation is not None:
-
- # The output list of numpy arrays (or Nones) to plot
- annplot = [None]*record.nsig
-
- # Move single channel annotations to channel 0
- if isinstance(annotation, annotations.Annotation):
- annplot[0] = annotation.sample
- elif isinstance(annotation, np.ndarray):
- annplot[0] = annotation
- # Ready list.
- elif isinstance(annotation, list):
- if len(annotation) > record.nsig:
- raise ValueError("The number of annotation series to plot cannot be more than the number of channels")
- if len(annotation) < record.nsig:
- annotation = annotation+[None]*(record.nsig-len(annotation))
- # Check elements. Copy over to new list.
- for ch in range(record.nsig):
- if isinstance(annotation[ch], annotations.Annotation):
- annplot[ch] = annotation[ch].sample
- elif isinstance(annotation[ch], np.ndarray):
- annplot[ch] = annotation[ch]
- elif annotation[ch] is None:
- pass
- else:
- raise TypeError("The 'annotation' argument must be a wfdb.Annotation object, a numpy array, None, or a list of these data types")
- else:
- raise TypeError("The 'annotation' argument must be a wfdb.Annotation object, a numpy array, None, or a list of these data types")
-
- # The annotation locations to plot
- tann = [None]*record.nsig
-
- for ch in range(record.nsig):
- if annplot[ch] is None:
- continue
- if timeunits == 'samples':
- tann[ch] = annplot[ch]
- elif timeunits == 'seconds':
- tann[ch] = annplot[ch]/float(record.fs)
- elif timeunits == 'minutes':
- tann[ch] = annplot[ch]/float(record.fs)/60
- else:
- tann[ch] = annplot[ch]/float(record.fs)/3600
- else:
- tann = None
- annplot = [None]*record.nsig
-
- # tann is the sample values to plot for each annotation series
- return (t, tann, annplot)
-
-
-
-# Plot the sample locations of a WFDB annotation on a new figure
-def plotann(annotation, title = None, timeunits = 'samples', returnfig = False):
- """ Plot sample locations of an Annotation object.
-
- Usage: plotann(annotation, title = None, timeunits = 'samples', returnfig = False)
-
- Input arguments:
- - annotation (required): An Annotation object. The sample attribute locations will be overlaid on the signal.
- - title (default=None): A string containing the title of the graph.
- - timeunits (default='samples'): String specifying the x axis unit.
- Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'.
- - returnfig (default=False): Specifies whether the figure is to be returned as an output argument
-
- Output argument:
- - figure: The matplotlib figure generated. Only returned if the 'returnfig' option is set to True.
-
- Note: The plotrec function is useful for plotting annotations on top of signal waveforms.
-
- Example Usage:
- import wfdb
- annotation = wfdb.rdann('sampledata/100', 'atr', sampfrom = 100000, sampto = 110000)
- annotation.fs = 360
- wfdb.plotann(annotation, timeunits = 'minutes')
- """
-
- # Check the validity of items used to make the plot
- # Get the x axis annotation values to plot
- plotvals = checkannplotitems(annotation, title, timeunits)
-
- # Create the plot
- fig=plt.figure()
-
- plt.plot(plotvals, np.zeros(len(plotvals)), 'r+')
-
- if title is not None:
- plt.title(title)
-
- # Axis Labels
- if timeunits == 'samples':
- plt.xlabel('index/sample')
- else:
- plt.xlabel('time/'+timeunits[:-1])
-
- plt.show(fig)
-
- # Return the figure if requested
- if returnfig:
- return fig
-
-# Check the validity of items used to make the annotation plot
-def checkannplotitems(annotation, title, timeunits):
-
- # signals
- if not isinstance(annotation, annotations.Annotation):
- raise TypeError("The 'annotation' field must be a 'wfdb.Annotation' object")
-
- # fs and timeunits
- allowedtimes = ['samples', 'seconds', 'minutes', 'hours']
- if timeunits not in allowedtimes:
- raise ValueError("The 'timeunits' field must be one of the following: ", allowedtimes)
-
- # fs must be valid when plotting time
- if timeunits != 'samples':
- if not isinstance(annotation.fs, _headers.floattypes):
- raise Exception("In order to plot time units, the Annotation object must have a valid 'fs' attribute")
-
- # Get x axis values to plot
- if timeunits == 'samples':
- plotvals = annotation.sample
- elif timeunits == 'seconds':
- plotvals = annotation.sample/float(annotation.fs)
- elif timeunits == 'minutes':
- plotvals = annotation.sample/float(annotation.fs*60)
- elif timeunits == 'hours':
- plotvals = annotation.sample/float(annotation.fs*3600)
-
- # title
- if title is not None and not isinstance(title, str):
- raise TypeError("The 'title' field must be a string")
-
- return plotvals
-
-
-def plot_records(directory=os.getcwd()):
- """
- Plot all wfdb records in a directory (by finding header files)
- """
- filelist = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
- filelist = [f for f in filelist if f.endswith('.hea')]
- recordlist = [f.split('.hea')[0] for f in filelist]
- recordlist.sort()
-
- for record_name in recordlist:
- record = records.rdsamp(record_name)
-
- plotrec(record, title='Record: %s' % record.recordname)
- input('Press enter to continue...')
diff --git a/wfdb/processing/__init__.py b/wfdb/processing/__init__.py
index 7a319c61..3be16ad9 100644
--- a/wfdb/processing/__init__.py
+++ b/wfdb/processing/__init__.py
@@ -1,4 +1,6 @@
-from .basic import resample_ann, resample_sig, resample_singlechan, resample_multichan, normalize
-from .gqrs import gqrs_detect
+from .basic import (resample_ann, resample_sig, resample_singlechan,
+ resample_multichan, normalize_bound, get_filter_gain)
+from .evaluate import Comparitor, compare_annotations
from .hr import compute_hr
-from .peaks import find_peaks, correct_peaks
+from .peaks import find_peaks, find_local_peaks, correct_peaks
+from .qrs import XQRS, xqrs_detect, gqrs_detect
diff --git a/wfdb/processing/basic.py b/wfdb/processing/basic.py
index a5eda8fe..05ce2ae1 100644
--- a/wfdb/processing/basic.py
+++ b/wfdb/processing/basic.py
@@ -1,30 +1,41 @@
-import numpy
+import numpy as np
from scipy import signal
-from wfdb import Annotation
+from ..io.annotation import Annotation
-def resample_ann(tt, sample):
- # tt: numpy.array as returned by signal.resample
- # sample: numpy.array containing indices of annotations (Annotation.sample)
+def resample_ann(resampled_t, ann_sample):
+ """
+ Compute the new annotation indices
- # Compute the new annotation indices
+ Parameters
+ ----------
+ resampled_t : numpy array
+ Array of signal locations as returned by scipy.signal.resample
+ ann_sample : numpy array
+ Array of annotation locations
- tmp = numpy.zeros(len(tt), dtype='int16')
+ Returns
+ -------
+ resampled_ann_sample : numpy array
+ Array of resampled annotation locations
+
+ """
+ tmp = np.zeros(len(resampled_t), dtype='int16')
j = 0
- tprec = tt[j]
- for i, v in enumerate(sample):
+ tprec = resampled_t[j]
+ for i, v in enumerate(ann_sample):
while True:
d = False
if v < tprec:
j -= 1
- tprec = tt[j]
-
- if j+1 == len(tt):
+ tprec = resampled_t[j]
+
+ if j+1 == len(resampled_t):
tmp[j] += 1
break
-
- tnow = tt[j+1]
+
+ tnow = resampled_t[j+1]
if tprec <= v and v <= tnow:
if v-tprec < tnow-v:
tmp[j] += 1
@@ -35,92 +46,197 @@ def resample_ann(tt, sample):
tprec = tnow
if d:
break
-
- idx = numpy.where(tmp>0)[0].astype('int64')
+
+ idx = np.where(tmp>0)[0].astype('int64')
res = []
for i in idx:
for j in range(tmp[i]):
res.append(i)
- assert len(res) == len(sample)
- return numpy.asarray(res, dtype='int64')
+ assert len(res) == len(ann_sample)
+
+ return np.asarray(res, dtype='int64')
def resample_sig(x, fs, fs_target):
- # x: a numpy.array containing the signal
- # fs: the current frequency
- # fs_target: the target frequency
+ """
+ Resample a signal to a different frequency.
- # Resample a signal
+ Parameters
+ ----------
+ x : numpy array
+ Array containing the signal
+ fs : int, or float
+ The original sampling frequency
+ fs_target : int, or float
+ The target frequency
- t = numpy.arange(x.shape[0]).astype('float64')
+ Returns
+ -------
+ resampled_x : numpy array
+ Array of the resampled signal values
+ resampled_t : numpy array
+ Array of the resampled signal locations
+
+ """
+
+ t = np.arange(x.shape[0]).astype('float64')
if fs == fs_target:
return x, t
new_length = int(x.shape[0]*fs_target/fs)
- xx, tt = signal.resample(x, num=new_length, t=t)
- assert xx.shape == tt.shape and xx.shape[0] == new_length
- assert numpy.all(numpy.diff(tt) > 0)
- return xx, tt
-
-
-def resample_singlechan(x, ann, fs, fs_target):
- # x: a numpy.array containing the signal
- # ann: an Annotation object
- # fs: the current frequency
- # fs_target: the target frequency
+ resampled_x, resampled_t = signal.resample(x, num=new_length, t=t)
+ assert resampled_x.shape == resampled_t.shape and resampled_x.shape[0] == new_length
+ assert np.all(np.diff(resampled_t) > 0)
- # Resample a single-channel signal with its annotations
+ return resampled_x, resampled_t
- xx, tt = resample_sig(x, fs, fs_target)
- new_sample = resample_ann(tt, ann.sample)
+def resample_singlechan(x, ann, fs, fs_target):
+ """
+ Resample a single-channel signal with its annotations
+
+ Parameters
+ ----------
+ x: numpy array
+ The signal array
+ ann : wfdb Annotation
+ The wfdb annotation object
+ fs : int, or float
+ The original frequency
+ fs_target : int, or float
+ The target frequency
+
+ Returns
+ -------
+ resampled_x : numpy array
+ Array of the resampled signal values
+ resampled_ann : wfdb Annotation
+ Annotation containing resampled annotation locations
+
+ """
+
+ resampled_x, resampled_t = resample_sig(x, fs, fs_target)
+
+ new_sample = resample_ann(resampled_t, ann.sample)
assert ann.sample.shape == new_sample.shape
- new_ann = Annotation(ann.recordname, ann.extension, new_sample, ann.symbol, ann.num, ann.subtype, ann.chan, ann.aux_note, ann.fs)
- return xx, new_ann
-
+ resampled_ann = Annotation(ann.record_name, ann.extension, new_sample,
+ ann.symbol, ann.num, ann.subtype, ann.chan, ann.aux_note, fs_target)
-def resample_multichan(xs, ann, fs, fs_target, resamp_ann_chan=0):
- # xs: a numpy.ndarray containing the signals as returned by wfdb.srdsamp
- # ann: an Annotation object
- # fs: the current frequency
- # fs_target: the target frequency
- # resample_ann_channel: the signal channel that is used to compute new annotation indices
+ return resampled_x, resampled_ann
- # Resample multiple channels with their annotations
+def resample_multichan(xs, ann, fs, fs_target, resamp_ann_chan=0):
+ """
+ Resample multiple channels with their annotations
+
+ Parameters
+ ----------
+ xs: numpy array
+ The signal array
+ ann : wfdb Annotation
+ The wfdb annotation object
+ fs : int, or float
+ The original frequency
+ fs_target : int, or float
+ The target frequency
+ resample_ann_channel : int, optional
+ The signal channel used to compute new annotation indices
+
+ Returns
+ -------
+ resampled_xs : numpy array
+ Array of the resampled signal values
+ resampled_ann : wfdb Annotation
+ Annotation containing resampled annotation locations
+
+ """
assert resamp_ann_chan < xs.shape[1]
lx = []
lt = None
for chan in range(xs.shape[1]):
- xx, tt = resample_sig(xs[:, chan], fs, fs_target)
- lx.append(xx)
+ resampled_x, resampled_t = resample_sig(xs[:, chan], fs, fs_target)
+ lx.append(resampled_x)
if chan == resamp_ann_chan:
- lt = tt
+ lt = resampled_t
new_sample = resample_ann(lt, ann.sample)
assert ann.sample.shape == new_sample.shape
- new_ann = Annotation(ann.recordname, ann.extension, new_sample, ann.symbol, ann.num, ann.subtype, ann.chan, ann.aux_note, ann.fs)
- return numpy.column_stack(lx), new_ann
+ resampled_ann = Annotation(ann.record_name, ann.extension, new_sample, ann.symbol,
+ ann.num, ann.subtype, ann.chan, ann.aux_note, fs_target)
+
+ return np.column_stack(lx), resampled_ann
-def normalize(x, lb=0, ub=1):
- # lb: Lower bound
- # ub: Upper bound
+def normalize_bound(sig, lb=0, ub=1):
+ """
+ Normalize a signal between the lower and upper bound
- # Resizes a signal between the lower and upper bound
+ Parameters
+ ----------
+ sig : numpy array
+ Original signal to be normalized
+ lb : int, or float
+ Lower bound
+ ub : int, or float
+ Upper bound
+
+ Returns
+ -------
+ x_normalized : numpy array
+ Normalized signal
+
+ """
mid = ub - (ub - lb) / 2
- min_v = numpy.min(x)
- max_v = numpy.max(x)
+ min_v = np.min(sig)
+ max_v = np.max(sig)
mid_v = max_v - (max_v - min_v) / 2
coef = (ub - lb) / (max_v - min_v)
- return x * coef - (mid_v * coef) + mid
-
-
-def smooth(x, window_size):
- box = numpy.ones(window_size)/window_size
- return numpy.convolve(x, box, mode='same')
+ return sig * coef - (mid_v * coef) + mid
+
+
+def smooth(sig, window_size):
+ """
+ Apply a uniform moving average filter to a signal
+
+ Parameters
+ ----------
+ sig : numpy array
+ The signal to smooth.
+ window_size : int
+ The width of the moving average filter.
+
+ """
+ box = np.ones(window_size)/window_size
+ return np.convolve(sig, box, mode='same')
+
+
+def get_filter_gain(b, a, f_gain, fs):
+ """
+ Given filter coefficients, return the gain at a particular
+ frequency.
+
+ Parameters
+ ----------
+ b : list
+ List of linear filter b coefficients
+ a : list
+ List of linear filter a coefficients
+ f_gain : int or float, optional
+ The frequency at which to calculate the gain
+ fs : int or float, optional
+ The sampling frequency of the system
+
+ """
+ # Save the passband gain
+ w, h = signal.freqz(b, a)
+ w_gain = f_gain * 2 * np.pi / fs
+
+ ind = np.where(w >= w_gain)[0][0]
+ gain = abs(h[ind])
+
+ return gain
diff --git a/wfdb/processing/evaluate.py b/wfdb/processing/evaluate.py
new file mode 100644
index 00000000..69ce7cd4
--- /dev/null
+++ b/wfdb/processing/evaluate.py
@@ -0,0 +1,339 @@
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+class Comparitor(object):
+ """
+ The class to implement and hold comparisons between two sets of
+ annotations.
+
+ See methods `print_summary` and `plot`.
+
+ Examples
+ --------
+ >>> import wfdb
+ >>> from wfdb import processing
+
+ >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
+ >>> ann_ref = wfdb.rdann('sample-data/100','atr')
+ >>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
+ >>> xqrs.detect()
+
+ >>> comparitor = processing.Comparitor(ann_ref.sample[1:],
+ xqrs.qrs_inds,
+ int(0.1 * fields['fs']),
+ sig[:,0])
+ >>> comparitor.compare()
+ >>> comparitor.print_summary()
+ >>> comparitor.plot()
+
+ """
+ def __init__(self, ref_sample, test_sample, window_width, signal=None):
+ """
+ Parameters
+ ----------
+ ref_sample : numpy array
+ An array of the reference sample locations
+ test_sample : numpy array
+ An array of the comparison sample locations
+ window_width : int
+ The width of the window
+ signal : 1d numpy array, optional
+ The signal array the annotation samples are labelling. Only used
+ for plotting.
+ """
+ if min(np.diff(ref_sample)) < 0 or min(np.diff(test_sample)) < 0:
+ raise ValueError(('The sample locations must be monotonically'
+ + ' increasing'))
+
+ self.ref_sample = ref_sample
+ self.test_sample = test_sample
+ self.n_ref = len(ref_sample)
+ self.n_test = len(test_sample)
+ self.window_width = window_width
+
+ # The matching test sample number for each reference annotation.
+ # -1 for indices with no match
+ self.matching_sample_nums = -1 * np.ones(self.n_ref, dtype='int')
+
+ self.signal = signal
+ # TODO: rdann return annotations.where
+
+ def _calc_stats(self):
+ """
+ Calculate performance statistics after the two sets of annotations
+ are compared.
+
+ Example:
+ -------------------
+ ref=500 test=480
+ { 30 { 470 } 10 }
+ -------------------
+
+ tp = 470
+ fp = 10
+ fn = 30
+
+ specificity = 470 / 500
+ positive_predictivity = 470 / 480
+ false_positive_rate = 10 / 480
+
+ """
+ # Reference annotation indices that were detected
+ self.matched_ref_inds = np.where(self.matching_sample_nums != -1)[0]
+ # Reference annotation indices that were missed
+ self.unmatched_ref_inds = np.where(self.matching_sample_nums == -1)[0]
+ # Test annotation indices that were matched to a reference annotation
+ self.matched_test_inds = self.matching_sample_nums[
+ self.matching_sample_nums != -1]
+ # Test annotation indices that were unmatched to a reference annotation
+ self.unmatched_test_inds = np.setdiff1d(np.array(range(self.n_test)),
+ self.matched_test_inds, assume_unique=True)
+
+ # Sample numbers that were matched and unmatched
+ self.matched_ref_sample = self.ref_sample[self.matched_ref_inds]
+ self.unmatched_ref_sample = self.ref_sample[self.unmatched_ref_inds]
+ self.matched_test_sample = self.test_sample[self.matched_test_inds]
+ self.unmatched_test_sample = self.test_sample[self.unmatched_test_inds]
+
+ # True positives = matched reference samples
+ self.tp = len(self.matched_ref_inds)
+ # False positives = extra test samples not matched
+ self.fp = self.n_test - self.tp
+ # False negatives = undetected reference samples
+ self.fn = self.n_ref - self.tp
+ # No tn attribute
+
+ self.specificity = float(self.tp) / self.n_ref
+ self.positive_predictivity = float(self.tp) / self.n_test
+ self.false_positive_rate = float(self.fp) / self.n_test
+
+
+ def compare(self):
+ """
+ Main comparison function
+ """
+ test_samp_num = 0
+ ref_samp_num = 0
+
+ # Iterate through the reference sample numbers
+ while ref_samp_num < self.n_ref and test_samp_num < self.n_test:
+
+ # Get the closest testing sample number for this reference sample
+ closest_samp_num, smallest_samp_diff = (
+ self._get_closest_samp_num(ref_samp_num, test_samp_num))
+ # Get the closest testing sample number for the next reference
+ # sample. This doesn't need to be called for the last index.
+ if ref_samp_num < self.n_ref - 1:
+ closest_samp_num_next, smallest_samp_diff_next = (
+ self._get_closest_samp_num(ref_samp_num + 1, test_samp_num))
+ else:
+ # Set non-matching value if there is no next reference sample
+ # to compete for the test sample
+ closest_samp_num_next = -1
+
+ # Found a contested test sample number. Decide which reference
+ # sample it belongs to.
+ if closest_samp_num == closest_samp_num_next:
+ # If the sample is closer to the next reference sample,
+ # assign it to the next refernece sample.
+ if smallest_samp_diff_next < smallest_samp_diff:
+ # Get the next closest sample for this reference sample.
+ # Can this be empty? Need to catch case where nothing left?
+ closest_samp_num, smallest_samp_diff = (
+ self._get_closest_samp_num(ref_samp_num, test_samp_num))
+
+ # If no clash, it is straightforward.
+
+ # Assign the reference-test pair if close enough
+ if smallest_samp_diff < self.window_width:
+ self.matching_sample_nums[ref_samp_num] = closest_samp_num
+
+ ref_samp_num += 1
+ test_samp_num = closest_samp_num + 1
+
+ self._calc_stats()
+
+
+ def _get_closest_samp_num(self, ref_samp_num, start_test_samp_num):
+ """
+ Return the closest testing sample number for the given reference
+ sample number. Limit the search from start_test_samp_num.
+ """
+
+ if start_test_samp_num >= self.n_test:
+ raise ValueError('Invalid starting test sample number.')
+
+ ref_samp = self.ref_sample[ref_samp_num]
+ test_samp = self.test_sample[start_test_samp_num]
+ samp_diff = ref_samp - test_samp
+
+ # Initialize running parameters
+ closest_samp_num = start_test_samp_num
+ smallest_samp_diff = abs(samp_diff)
+
+ # Iterate through the testing samples
+ for test_samp_num in range(start_test_samp_num, self.n_test):
+ test_samp = self.test_sample[test_samp_num]
+ samp_diff = ref_samp - test_samp
+ abs_samp_diff = abs(samp_diff)
+
+ # Found a better match
+ if abs(samp_diff) < smallest_samp_diff:
+ closest_samp_num = test_samp_num
+ smallest_samp_diff = abs_samp_diff
+
+ # Stop iterating when the ref sample is first passed or reached
+ if samp_diff <= 0:
+ break
+
+ return closest_samp_num, smallest_samp_diff
+
+ def print_summary(self):
+ """
+ Print summary metrics of the annotation comparisons.
+ """
+ # True positives = matched reference samples
+ self.tp = len(self.matched_ref_inds)
+ # False positives = extra test samples not matched
+ self.fp = self.n_test - self.tp
+ # False negatives = undetected reference samples
+ self.fn = self.n_ref - self.tp
+ # No tn attribute
+
+ self.specificity = self.tp / self.n_ref
+ self.positive_predictivity = self.tp / self.n_test
+ self.false_positive_rate = self.fp / self.n_test
+
+ print('%d reference annotations, %d test annotations\n'
+ % (self.n_ref, self.n_test))
+ print('True Positives (matched samples): %d' % self.tp)
+ print('False Positives (unmatched test samples: %d' % self.fp)
+ print('False Negatives (unmatched reference samples): %d\n' % self.fn)
+
+ print('Specificity: %.4f (%d/%d)'
+ % (self.specificity, self.tp, self.n_ref))
+ print('Positive Predictivity: %.4f (%d/%d)'
+ % (self.positive_predictivity, self.tp, self.n_test))
+ print('False Positive Rate: %.4f (%d/%d)'
+ % (self.false_positive_rate, self.fp, self.n_test))
+
+
+ def plot(self, sig_style='', title=None, figsize=None,
+ return_fig=False):
+ """
+ Plot the comparison of two sets of annotations, possibly
+ overlaid on their original signal.
+
+ Parameters
+ ----------
+ sig_style : str, optional
+ The matplotlib style of the signal
+ title : str, optional
+ The title of the plot
+ figsize: tuple, optional
+ Tuple pair specifying the width, and height of the figure.
+ It is the'figsize' argument passed into matplotlib.pyplot's
+ `figure` function.
+ return_fig : bool, optional
+ Whether the figure is to be returned as an output argument.
+
+ """
+ fig = plt.figure(figsize=figsize)
+ ax = fig.add_subplot(1, 1, 1)
+
+ legend = ['Signal',
+ 'Matched Reference Annotations (%d/%d)' % (self.tp, self.n_ref),
+ 'Unmatched Reference Annotations (%d/%d)' % (self.fn, self.n_ref),
+ 'Matched Test Annotations (%d/%d)' % (self.tp, self.n_test),
+ 'Unmatched Test Annotations (%d/%d)' % (self.fp, self.n_test)
+ ]
+
+ # Plot the signal if any
+ if self.signal is not None:
+ ax.plot(self.signal, sig_style)
+
+ # Plot reference annotations
+ ax.plot(self.matched_ref_sample,
+ self.signal[self.matched_ref_sample], 'ko')
+ ax.plot(self.unmatched_ref_sample,
+ self.signal[self.unmatched_ref_sample], 'ko',
+ fillstyle='none')
+ # Plot test annotations
+ ax.plot(self.matched_test_sample,
+ self.signal[self.matched_test_sample], 'g+')
+ ax.plot(self.unmatched_test_sample,
+ self.signal[self.unmatched_test_sample], 'rx')
+
+ ax.legend(legend)
+
+ # Just plot annotations
+ else:
+ # Plot reference annotations
+ ax.plot(self.matched_ref_sample, np.ones(self.tp), 'ko')
+ ax.plot(self.unmatched_ref_sample, np.ones(self.fn), 'ko',
+ fillstyle='none')
+ # Plot test annotations
+ ax.plot(self.matched_test_sample, 0.5 * np.ones(self.tp), 'g+')
+ ax.plot(self.unmatched_test_sample, 0.5 * np.ones(self.fp), 'rx')
+ ax.legend(legend[1:])
+
+ if title:
+ ax.set_title(title)
+
+ ax.set_xlabel('time/sample')
+
+ fig.show()
+
+ if return_fig:
+ return fig, ax
+
+
+def compare_annotations(ref_sample, test_sample, window_width, signal=None):
+ """
+ Compare a set of reference annotation locations against a set of
+ test annotation locations.
+
+ See the Comparitor class docstring for more information.
+
+ Parameters
+ ----------
+ ref_sample : 1d numpy array
+ Array of reference sample locations
+ test_sample : 1d numpy array
+ Array of test sample locations to compare
+ window_width : int
+ The maximum absolute difference in sample numbers that is
+ permitted for matching annotations.
+ signal : 1d numpy array, optional
+ The original signal of the two annotations. Only used for
+ plotting.
+
+ Returns
+ -------
+ comparitor : Comparitor object
+ Object containing parameters about the two sets of annotations
+
+ Examples
+ --------
+ >>> import wfdb
+ >>> from wfdb import processing
+
+ >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
+ >>> ann_ref = wfdb.rdann('sample-data/100','atr')
+ >>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
+ >>> xqrs.detect()
+
+ >>> comparitor = processing.compare_annotations(ann_ref.sample[1:],
+ xqrs.qrs_inds,
+ int(0.1 * fields['fs']),
+ sig[:,0])
+ >>> comparitor.print_summary()
+ >>> comparitor.plot()
+
+ """
+ comparitor = Comparitor(ref_sample=ref_sample, test_sample=test_sample,
+ window_width=window_width, signal=signal)
+ comparitor.compare()
+
+ return comparitor
diff --git a/wfdb/processing/gqrs.py b/wfdb/processing/gqrs.py
deleted file mode 100644
index 0dcb8478..00000000
--- a/wfdb/processing/gqrs.py
+++ /dev/null
@@ -1,494 +0,0 @@
-import numpy
-import copy
-
-def time_to_sample_number(seconds, frequency):
- return seconds * frequency + 0.5
-
-
-class Conf(object):
- def __init__(self, freq, gain,
- hr=75,
- RRdelta=0.2, RRmin=0.28, RRmax=2.4,
- QS=0.07, QT=0.35,
- RTmin=0.25, RTmax=0.33,
- QRSa=750, QRSamin=130,
- thresh=1.0):
- self.freq = freq
-
- self.sps = int(time_to_sample_number(1, freq))
- self.spm = int(time_to_sample_number(60, freq))
-
- self.hr = hr
- self.RR = 60.0 / self.hr
- self.RRdelta = RRdelta
- self.RRmin = RRmin
- self.RRmax = RRmax
- self.QS = QS
- self.QT = QT
- self.RTmin = RTmin
- self.RTmax = RTmax
- self.QRSa = QRSa
- self.QRSamin = QRSamin
- self.thresh = thresh
-
- self._NORMAL = 1 # normal beat
- self._ARFCT = 16 # isolated QRS-like artifact
- self._NOTE = 22 # comment annotation
- self._TWAVE = 27 # T-wave peak
- self._NPEAKS = 64 # number of peaks buffered (per signal)
- self._BUFLN = 32768 # must be a power of 2, see qf()
-
- self.rrmean = int(self.RR * self.sps)
- self.rrdev = int(self.RRdelta * self.sps)
- self.rrmin = int(self.RRmin * self.sps)
- self.rrmax = int(self.RRmax * self.sps)
-
- self.rrinc = int(self.rrmean / 40)
- if self.rrinc < 1:
- self.rrinc = 1
-
- self.dt = int(self.QS * self.sps / 4)
- if self.dt < 1:
- self.dt = 1
- print("Warning: sampling rate may be too low!")
-
- self.rtmin = int(self.RTmin * self.sps)
- self.rtmean = int(0.75 * self.QT * self.sps)
- self.rtmax = int(self.RTmax * self.sps)
-
- dv = gain * self.QRSamin * 0.001
- self.pthr = int((self.thresh * dv * dv) / 6)
- self.qthr = self.pthr << 1
- self.pthmin = self.pthr >> 2
- self.qthmin = int((self.pthmin << 2) / 3)
- self.tamean = self.qthr # initial value for mean T-wave amplitude
-
- # Filter constants and thresholds.
- self.dt2 = 2 * self.dt
- self.dt3 = 3 * self.dt
- self.dt4 = 4 * self.dt
-
- self.smdt = self.dt
- self.v1norm = self.smdt * self.dt * 64
-
- self.smt = 0
- self.smt0 = 0 + self.smdt
-
-
-class Peak(object):
- def __init__(self, peak_time, peak_amp, peak_type):
- self.time = peak_time
- self.amp = peak_amp
- self.type = peak_type
- self.next_peak = None
- self.prev_peak = None
-
-
-class Annotation(object):
- def __init__(self, ann_time, ann_type, ann_subtype, ann_num):
- self.time = ann_time
- self.type = ann_type
- self.subtype = ann_subtype
- self.num = ann_num
-
-
-class GQRS(object):
- def putann(self, annotation):
- self.annotations.append(copy.deepcopy(annotation))
-
- def detect(self, x, conf, adczero):
- self.c = conf
- self.annotations = []
- self.sample_valid = False
-
- if len(x) < 1:
- return []
-
- self.x = x
- self.adczero = adczero
-
- self.qfv = numpy.zeros((self.c._BUFLN), dtype="int64")
- self.smv = numpy.zeros((self.c._BUFLN), dtype="int64")
- self.v1 = 0
-
- t0 = 0
- self.tf = len(x) - 1
- self.t = 0 - self.c.dt4
-
- self.annot = Annotation(0, "NOTE", 0, 0)
-
- # Cicular buffer of Peaks
- first_peak = Peak(0, 0, 0)
- tmp = first_peak
- for _ in range(1, self.c._NPEAKS):
- tmp.next_peak = Peak(0, 0, 0)
- tmp.next_peak.prev_peak = tmp
- tmp = tmp.next_peak
- tmp.next_peak = first_peak
- first_peak.prev_peak = tmp
- self.current_peak = first_peak
-
- if self.c.spm > self.c._BUFLN:
- if self.tf - t0 > self.c._BUFLN:
- tf_learn = t0 + self.c._BUFLN - self.c.dt4
- else:
- tf_learn = self.tf - self.c.dt4
- else:
- if self.tf - t0 > self.c.spm:
- tf_learn = t0 + self.c.spm - self.c.dt4
- else:
- tf_learn = self.tf - self.c.dt4
-
- self.countdown = -1
- self.state = "LEARNING"
- self.gqrs(t0, tf_learn)
-
- self.rewind_gqrs()
-
- self.state = "RUNNING"
- self.t = t0 - self.c.dt4
- self.gqrs(t0, self.tf)
-
- return self.annotations
-
- def rewind_gqrs(self):
- self.countdown = -1
- self.at(self.t)
- self.annot.time = 0
- self.annot.type = "NORMAL"
- self.annot.subtype = 0
- self.annot.num = 0
- p = self.current_peak
- for _ in range(self.c._NPEAKS):
- p.time = 0
- p.type = 0
- p.amp = 0
- p = p.next_peak
-
- def at(self, t):
- if t < 0:
- self.sample_valid = True
- return self.x[0]
- if t > len(self.x) - 1:
- self.sample_valid = False
- return self.x[-1]
- self.sample_valid = True
- return self.x[t]
-
- def smv_at(self, t):
- return self.smv[t & (self.c._BUFLN - 1)]
-
- def smv_put(self, t, v):
- self.smv[t & (self.c._BUFLN - 1)] = v
-
- def qfv_at(self, t):
- return self.qfv[t & (self.c._BUFLN - 1)]
-
- def qfv_put(self, t, v):
- self.qfv[t & (self.c._BUFLN - 1)] = v
-
- def sm(self, at_t): # CHECKED!
- # implements a trapezoidal low pass (smoothing) filter
- # (with a gain of 4*smdt) applied to input signal sig
- # before the QRS matched filter qf().
- # Before attempting to 'rewind' by more than BUFLN-smdt
- # samples, reset smt and smt0.
- smt = self.c.smt
- smdt = int(self.c.smdt)
-
- v = 0
- while at_t > smt:
- smt += 1
- if smt > int(self.c.smt0):
- tmp = int(self.smv_at(smt - 1) + \
- self.at(smt + smdt) + self.at(smt + smdt - 1) - \
- self.at(smt - smdt) - self.at(smt - smdt - 1))
- self.smv_put(smt, tmp)
- else:
- v = int(self.at(smt))
- for j in range(1, smdt):
- smtpj = self.at(smt + j)
- smtlj = self.at(smt - j)
- v += int(smtpj + smtlj)
- self.smv_put(smt, (v << 1) + self.at(smt + j+1) + self.at(smt - j-1) - \
- self.adczero * (smdt << 2))
- self.c.smt = smt
- return self.smv_at(at_t)
-
- def qf(self): # CHECKED!
- # evaluate the QRS detector filter for the next sample
-
- # do this first, to ensure that all of the other smoothed values needed below are in the buffer
- dv2 = self.sm(self.t + self.c.dt4)
- dv2 -= self.smv_at(self.t - self.c.dt4)
- dv1 = int(self.smv_at(self.t + self.c.dt) - self.smv_at(self.t - self.c.dt))
- dv = dv1 << 1
- dv -= int(self.smv_at(self.t + self.c.dt2) - self.smv_at(self.t - self.c.dt2))
- dv = dv << 1
- dv += dv1
- dv -= int(self.smv_at(self.t + self.c.dt3) - self.smv_at(self.t - self.c.dt3))
- dv = dv << 1
- dv += dv2
- self.v1 += dv
- v0 = int(self.v1 / self.c.v1norm)
- self.qfv_put(self.t, v0 * v0)
-
- def gqrs(self, from_sample, to_sample):
- q0 = None
- q1 = 0
- q2 = 0
- rr = None
- rrd = None
- rt = None
- rtd = None
- rtdmin = None
-
- p = None # (Peak)
- q = None # (Peak)
- r = None # (Peak)
- tw = None # (Peak)
-
- last_peak = from_sample
- last_qrs = from_sample
-
- def add_peak(peak_time, peak_amp, type):
- p = self.current_peak.next_peak
- p.time = peak_time
- p.amp = peak_amp
- p.type = type
- self.current_peak = p
- p.next_peak.amp = 0
-
- def peaktype(p):
- # peaktype() returns 1 if p is the most prominent peak in its neighborhood, 2
- # otherwise. The neighborhood consists of all other peaks within rrmin.
- # Normally, "most prominent" is equivalent to "largest in amplitude", but this
- # is not always true. For example, consider three consecutive peaks a, b, c
- # such that a and b share a neighborhood, b and c share a neighborhood, but a
- # and c do not; and suppose that amp(a) > amp(b) > amp(c). In this case, if
- # there are no other peaks, a is the most prominent peak in the (a, b)
- # neighborhood. Since b is thus identified as a non-prominent peak, c becomes
- # the most prominent peak in the (b, c) neighborhood. This is necessary to
- # permit detection of low-amplitude beats that closely precede or follow beats
- # with large secondary peaks (as, for example, in R-on-T PVCs).
- if p.type:
- return p.type
- else:
- a = p.amp
- t0 = p.time - self.c.rrmin
- t1 = p.time + self.c.rrmin
-
- if t0 < 0:
- t0 = 0
-
- pp = p.prev_peak
- while t0 < pp.time and pp.time < pp.next_peak.time:
- if pp.amp == 0:
- break
- if a < pp.amp and peaktype(pp) == 1:
- p.type = 2
- return p.type
- # end:
- pp = pp.prev_peak
-
- pp = p.next_peak
- while pp.time < t1 and pp.time > pp.prev_peak.time:
- if pp.amp == 0:
- break
- if a < pp.amp and peaktype(pp) == 1:
- p.type = 2
- return p.type
- # end:
- pp = pp.next_peak
-
- p.type = 1
- return p.type
-
- def find_missing(r, p):
- if r is None or p is None:
- return None
-
- minrrerr = p.time - r.time
-
- s = None
- q = r.next_peak
- while q.time < p.time:
- if peaktype(q) == 1:
- rrtmp = q.time - r.time
- rrerr = rrtmp - self.c.rrmean
- if rrerr < 0:
- rrerr = -rrerr
- if rrerr < minrrerr:
- minrrerr = rrerr
- s = q
- # end:
- q = q.next_peak
-
- return s
-
- r = None
- next_minute = 0
- minutes = 0
- while self.t <= to_sample + self.c.sps:
- if self.countdown < 0:
- if self.sample_valid:
- self.qf()
- else:
- self.countdown = int(time_to_sample_number(1, self.c.freq))
- self.state = "CLEANUP"
- else:
- self.countdown -= 1
- if self.countdown < 0:
- break
-
- q0 = self.qfv_at(self.t)
- q1 = self.qfv_at(self.t - 1)
- q2 = self.qfv_at(self.t - 2)
-
- # state == RUNNING only
- if q1 > self.c.pthr and q2 < q1 and q1 >= q0 and self.t > self.c.dt4:
- add_peak(self.t - 1, q1, 0)
- last_peak = self.t - 1
- p = self.current_peak.next_peak
- while p.time < self.t - self.c.rtmax:
- if p.time >= self.annot.time + self.c.rrmin and peaktype(p) == 1:
- if p.amp > self.c.qthr:
- rr = p.time - self.annot.time
- q = find_missing(r, p)
- if rr > self.c.rrmean + 2 * self.c.rrdev and \
- rr > 2 * (self.c.rrmean - self.c.rrdev) and \
- q is not None:
- p = q
- rr = p.time - self.annot.time
- self.annot.subtype = 1
- rrd = rr - self.c.rrmean
- if rrd < 0:
- rrd = -rrd
- self.c.rrdev += (rrd - self.c.rrdev) >> 3
- if rrd > self.c.rrinc:
- rrd = self.c.rrinc
- if rr > self.c.rrmean:
- self.c.rrmean += rrd
- else:
- self.c.rrmean -= rrd
- if p.amp > self.c.qthr * 4:
- self.c.qthr += 1
- elif p.amp < self.c.qthr:
- self.c.qthr -= 1
- if self.c.qthr > self.c.pthr * 20:
- self.c.qthr = self.c.pthr * 20
- last_qrs = p.time
-
- if self.state == "RUNNING":
- self.annot.time = p.time - self.c.dt2
- self.annot.type = "NORMAL"
- qsize = int(p.amp * 10.0 / self.c.qthr)
- if qsize > 127:
- qsize = 127
- self.annot.num = qsize
- self.putann(self.annot)
- self.annot.time += self.c.dt2
-
- # look for this beat's T-wave
- tw = None
- rtdmin = self.c.rtmean
- q = p.next_peak
- while q.time > self.annot.time:
- rt = q.time - self.annot.time - self.c.dt2
- if rt < self.c.rtmin:
- # end:
- q = q.next_peak
- continue
- if rt > self.c.rtmax:
- break
- rtd = rt - self.c.rtmean
- if rtd < 0:
- rtd = -rtd
- if rtd < rtdmin:
- rtdmin = rtd
- tw = q
- # end:
- q = q.next_peak
- if tw is not None:
- tmp_time = tw.time - self.c.dt2
- tann = Annotation(tmp_time, "TWAVE",
- 1 if tmp_time > self.annot.time + self.c.rtmean else 0, rtdmin)
- # if self.state == "RUNNING":
- # self.putann(tann)
- rt = tann.time - self.annot.time
- self.c.rtmean += (rt - self.c.rtmean) >> 4
- if self.c.rtmean > self.c.rtmax:
- self.c.rtmean = self.c.rtmax
- elif self.c.rtmean < self.c.rtmin:
- self.c.rtmean = self.c.rrmin
- tw.type = 2 # mark T-wave as secondary
- r = p
- q = None
- self.annot.subtype = 0
- elif self.t - last_qrs > self.c.rrmax and self.c.qthr > self.c.qthmin:
- self.c.qthr -= (self.c.qthr >> 4)
- # end:
- p = p.next_peak
- elif self.t - last_peak > self.c.rrmax and self.c.pthr > self.c.pthmin:
- self.c.pthr -= (self.c.pthr >> 4)
-
- self.t += 1
- if self.t >= next_minute:
- next_minute += self.c.spm
- minutes += 1
- if minutes >= 60:
- minutes = 0
-
- if self.state == "LEARNING":
- return
-
- # Mark the last beat or two.
- p = self.current_peak.next_peak
- while p.time < p.next_peak.time:
- if p.time >= self.annot.time + self.c.rrmin and p.time < self.tf and peaktype(p) == 1:
- self.annot.type = "NORMAL"
- self.annot.time = p.time
- self.putann(self.annot)
- # end:
- p = p.next_peak
-
-
-def gqrs_detect(x, fs, adcgain, adczero, threshold=1.0,
- hr=75, RRdelta=0.2, RRmin=0.28, RRmax=2.4,
- QS=0.07, QT=0.35, RTmin=0.25, RTmax=0.33,
- QRSa=750, QRSamin=130):
- """
- Detect qrs locations in a single channel ecg.
-
- Functionally, a direct port of the gqrs algorithm from the original
- wfdb package. Therefore written to accept wfdb record fields.
-
- Input arguments:
- - x (required): The digital signal as a numpy array
- - fs (required): The sampling frequency of the signal
- - adcgain: The gain of the signal (the number of adus (q.v.) per physical unit)
- - adczero (required): The value produced by the ADC given a 0 volt input.
- - threshold (default=1.0): The threshold for detection
- - hr (default=75): Typical heart rate, in beats per minute
- - RRdelta (default=0.2): Typical difference between successive RR intervals in seconds
- - RRmin (default=0.28): Minimum RR interval ("refractory period"), in seconds
- - RRmax (default=2.4): Maximum RR interval, in seconds; thresholds will be adjusted
- if no peaks are detected within this interval
- - QS (default=0.07): Typical QRS duration, in seconds
- - QT (default=0.35): Typical QT interval, in seconds
- - RTmin (default=0.25): Minimum interval between R and T peaks, in seconds
- - RTmax (default=0.33): Maximum interval between R and T peaks, in seconds
- - QRSa (default=750): Typical QRS peak-to-peak amplitude, in microvolts
- - QRSamin (default=130): Minimum QRS peak-to-peak amplitude, in microvolts
-
- Note: This function should not be used for signals with fs <= 50Hz
- """
- conf = Conf(freq=fs, gain=adcgain, hr=hr,
- RRdelta=RRdelta, RRmin=RRmin, RRmax=RRmax,
- QS=QS, QT=QT,
- RTmin=RTmin, RTmax=RTmax,
- QRSa=QRSa, QRSamin=QRSamin,
- thresh=threshold)
- gqrs = GQRS()
- annotations = gqrs.detect(x=x, conf=conf, adczero=adczero)
- return [a.time for a in annotations]
diff --git a/wfdb/processing/hr.py b/wfdb/processing/hr.py
index f07e30b5..5a62409d 100644
--- a/wfdb/processing/hr.py
+++ b/wfdb/processing/hr.py
@@ -1,37 +1,42 @@
import numpy as np
-def compute_hr(siglen, peak_indices, fs):
+def compute_hr(sig_len, qrs_inds, fs):
"""
Compute instantaneous heart rate from peak indices.
- Usage:
- heart_rate = compute_hr(siglen, peak_indices, fs)
+ Parameters
+ ----------
+ sig_len : int
+ The length of the corresponding signal
+ qrs_inds : numpy array
+ The qrs index locations
+ fs : int, or float
+ The corresponding signal's sampling frequency.
+
+ Returns
+ -------
+ heart_rate : numpy array
+ An array of the instantaneous heart rate, with the length of the
+ corresponding signal. Contains numpy.nan where heart rate could
+ not be computed.
- Input argumnets:
- - siglen (required): The length of the corresponding signal
- - peak_indices (required): The peak indices.
- - fs (required): The corresponding signal's sampling frequency.
-
- Output arguments:
- - heart_rate: A numpy array of the instantaneous heart rate, with the length
- of the corresponding signal. Contains numpy.nan where heart rate could not be computed.
"""
- heart_rate = np.full(siglen, np.nan, dtype='float32')
+ heart_rate = np.full(sig_len, np.nan, dtype='float32')
- if len(peak_indices) < 2:
+ if len(qrs_inds) < 2:
return heart_rate
current_hr = np.nan
- for i in range(0, len(peak_indices)-2):
- a = peak_indices[i]
- b = peak_indices[i+1]
- c = peak_indices[i+2]
+ for i in range(0, len(qrs_inds)-2):
+ a = qrs_inds[i]
+ b = qrs_inds[i+1]
+ c = qrs_inds[i+2]
RR = (b-a) * (1.0 / fs) * 1000
hr = 60000.0 / RR
heart_rate[b+1:c+1] = hr
- heart_rate[peak_indices[-1]:] = heart_rate[peak_indices[-1]]
+ heart_rate[qrs_inds[-1]:] = heart_rate[qrs_inds[-1]]
return heart_rate
diff --git a/wfdb/processing/peaks.py b/wfdb/processing/peaks.py
index bcf95548..54668e0c 100644
--- a/wfdb/processing/peaks.py
+++ b/wfdb/processing/peaks.py
@@ -1,109 +1,218 @@
import copy
-import numpy
-from .gqrs import time_to_sample_number, Conf, Peak, Annotation
-from .basic import smooth
-
-def find_peaks(x):
- # Definitions:
- # * Hard peak: a peak that is either /\ or \/
- # * Soft peak: a peak that is either /-*\ or \-*/ (In that cas we define the middle of it as the peak)
+import numpy as np
- # Returns two numpy arrays:
- # * hard_peaks contains the indices of the Hard peaks
- # * soft_peaks contains the indices of the Soft peaks
+from .basic import smooth
- if len(x) == 0:
- return numpy.empty([0]), numpy.empty([0])
- tmp = x[1:]
- tmp = numpy.append(tmp, [x[-1]])
- tmp = x-tmp
- tmp[numpy.where(tmp>0)] = +1
- tmp[numpy.where(tmp==0)] = 0
- tmp[numpy.where(tmp<0)] = -1
+def find_peaks(sig):
+ """
+ Find hard peaks and soft peaks in a signal, defined as follows:
+ - Hard peak: a peak that is either /\ or \/
+ - Soft peak: a peak that is either /-*\ or \-*/
+ In this case we define the middle as the peak
+
+ Parameters
+ ----------
+ sig : np array
+ The 1d signal array
+
+ Returns
+ -------
+ hard_peaks : numpy array
+ Array containing the indices of the hard peaks:
+ soft_peaks : numpy array
+ Array containing the indices of the soft peaks
+
+ """
+ if len(sig) == 0:
+ return np.empty([0]), np.empty([0])
+
+ tmp = sig[1:]
+ tmp = np.append(tmp, [sig[-1]])
+ tmp = sig - tmp
+ tmp[np.where(tmp>0)] = 1
+ tmp[np.where(tmp==0)] = 0
+ tmp[np.where(tmp<0)] = -1
tmp2 = tmp[1:]
- tmp2 = numpy.append(tmp2, [0])
+ tmp2 = np.append(tmp2, [0])
tmp = tmp-tmp2
- hard_peaks = numpy.where(numpy.logical_or(tmp==-2,tmp==+2))[0]+1
+
+ hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1
soft_peaks = []
- for iv in numpy.where(numpy.logical_or(tmp==-1,tmp==+1))[0]:
+
+ for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]:
t = tmp[iv]
i = iv+1
while True:
if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2:
break
if tmp[i] == t:
- soft_peaks.append(int(iv+(i-iv)/2))
+ soft_peaks.append(int(iv + (i - iv)/2))
break
i += 1
- soft_peaks = numpy.asarray(soft_peaks)+1
+ soft_peaks = np.array(soft_peaks, dtype='int') + 1
+
return hard_peaks, soft_peaks
-def correct_peaks(x, peak_indices, min_gap, max_gap, smooth_window):
- N = x.shape[0]
+def find_local_peaks(sig, radius):
+ """
+ Find all local peaks in a signal. A sample is a local peak if it is
+ the largest value within the samples on its left and right.
+
+ In cases where it shares the max value with nearby samples, the
+ middle sample is classified as the local peak.
+
+ Parameters
+ ----------
+ sig : numpy array
+ 1d numpy array of the signal.
+ radius : int
+ The radius in which to search for defining local maxima.
+
+ """
+ # TODO: Fix flat mountain scenarios.
+ peak_inds = []
+
+ i = 0
+ while i < radius + 1:
+ if sig[i] == max(sig[:i + radius]):
+ peak_inds.append(i)
+ i += radius
+ else:
+ i += 1
+
+ while i < len(sig):
+ if sig[i] == max(sig[i - radius:i + radius]):
+ peak_inds.append(i)
+ i += radius
+ else:
+ i += 1
+
+ while i < len(sig):
+ if sig[i] == max(sig[i - radius:]):
+ peak_inds.append(i)
+ i += radius
+ else:
+ i += 1
+
+ return (np.array(peak_inds))
+
+
+def correct_peaks(sig, peak_inds, search_radius, smooth_window_size,
+ peak_dir='compare'):
+ """
+ Adjust a set of detected peaks to coincide with local signal maxima,
+ and
+
+ Parameters
+ ----------
+ sig : numpy array
+ The 1d signal array
+ peak_inds : np array
+ Array of the original peak indices
+ max_gap : int
+ The radius within which the original peaks may be shifted.
+ smooth_window_size : int
+ The window size of the moving average filter applied on the
+ signal. Peak distance is calculated on the difference between
+ the original and smoothed signal.
+ peak_dir : str, optional
+ The expected peak direction: 'up' or 'down', 'both', or
+ 'compare'.
+ - If 'up', the peaks will be shifted to local maxima
+ - If 'down', the peaks will be shifted to local minima
+ - If 'both', the peaks will be shifted to local maxima of the
+ rectified signal
+ - If 'compare', the function will try both 'up' and 'down'
+ options, and choose the direction that gives the largest mean
+ distance from the smoothed signal.
+
+ Returns
+ -------
+ corrected_peak_inds : numpy array
+ Array of the corrected peak indices
+
+
+ Examples
+ --------
+
+ """
+ sig_len = sig.shape[0]
+ n_peaks = len(peak_inds)
+
+ # Subtract the smoothed signal from the original
+ sig = sig - smooth(sig=sig, window_size=smooth_window_size)
+
+
+ # Shift peaks to local maxima
+ if peak_dir == 'up':
+ shifted_peak_inds = shift_peaks(sig=sig,
+ peak_inds=peak_inds,
+ search_radius=search_radius,
+ peak_up=True)
+ elif peak_dir == 'down':
+ shifted_peak_inds = shift_peaks(sig=sig,
+ peak_inds=peak_inds,
+ search_radius=search_radius,
+ peak_up=False)
+ elif peak_dir == 'both':
+ shifted_peak_inds = shift_peaks(sig=np.abs(sig),
+ peak_inds=peak_inds,
+ search_radius=search_radius,
+ peak_up=True)
+ else:
+ shifted_peak_inds_up = shift_peaks(sig=sig,
+ peak_inds=peak_inds,
+ search_radius=search_radius,
+ peak_up=True)
+ shifted_peak_inds_down = shift_peaks(sig=sig,
+ peak_inds=peak_inds,
+ search_radius=search_radius,
+ peak_up=False)
+
+ # Choose the direction with the biggest deviation
+ up_dist = np.mean(np.abs(sig[shifted_peak_inds_up]))
+ down_dist = np.mean(np.abs(sig[shifted_peak_inds_down]))
+
+ if up_dist >= down_dist:
+ shifted_peak_inds = shifted_peak_inds_up
+ else:
+ shifted_peak_inds = shifted_peak_inds_down
- rpeaks = numpy.zeros(N)
- rpeaks[peak_indices] = 1.0
+ return shifted_peak_inds
- rpeaks = rpeaks.astype('int32')
- # 1- Extract ranges where we have one or many ones side by side
- rpeaks_ranges = []
- tmp_idx = 0
- for i in range(1, len(rpeaks)):
- if rpeaks[i-1] == 1:
- if rpeaks[i] == 0:
- rpeaks_ranges.append((tmp_idx, i-1))
+def shift_peaks(sig, peak_inds, search_radius, peak_up):
+ """
+ Helper function for correct_peaks. Return the shifted peaks to local
+ maxima or minima within a radius.
+
+ peak_up : bool
+ Whether the expected peak direction is up
+ """
+ sig_len = sig.shape[0]
+ n_peaks = len(peak_inds)
+ # The indices to shift each peak ind by
+ shift_inds = np.zeros(n_peaks, dtype='int')
+
+ # Iterate through peaks
+ for i in range(n_peaks):
+ ind = peak_inds[i]
+ local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)]
+
+ if peak_up:
+ shift_inds[i] = np.argmax(local_sig)
else:
- if rpeaks[i] == 1:
- tmp_idx = i
-
- smoothed = smooth(x, smooth_window)
-
- # Compute signal's peaks
- hard_peaks, soft_peaks = find_peaks(x=x)
- all_peak_idxs = numpy.concatenate((hard_peaks, soft_peaks)).astype('int64')
-
- # Replace each range of ones by the index of the best value in it
- tmp = set()
- for rp_range in rpeaks_ranges:
- r = numpy.arange(rp_range[0], rp_range[1]+1, dtype='int64')
- vals = x[r]
- smoothed_vals = smoothed[r]
- p = r[numpy.argmax(numpy.absolute(numpy.asarray(vals)-smoothed_vals))]
- tmp.add(p)
-
- # Replace all peaks by the peak within x-max_gap < x < x+max_gap which have the bigget distance from smooth curve
- dist = numpy.absolute(x-smoothed) # Peak distance from the smoothed mean
- rpeak_indices = set()
- for p in tmp:
- a = max(0, p-max_gap)
- b = min(N, p+max_gap)
- r = numpy.arange(a, b, dtype='int64')
- idx_best = r[numpy.argmax(dist[r])]
- rpeak_indices.add(idx_best)
-
- rpeak_indices = list(rpeak_indices)
-
- # Prevent multiple peaks to appear in the max bpm range (max_gap)
- # If we found more than one peak in this interval, then we choose the peak with the maximum amplitude compared to the mean of the signal
- tmp = numpy.asarray(rpeak_indices)
- to_remove = {}
- for idx in rpeak_indices:
- if idx in to_remove:
- continue
- r = tmp[numpy.where(numpy.absolute(tmp-idx)<=max_gap)[0]]
- if len(r) == 1:
- continue
- rr = r.astype('int64')
- vals = x[rr]
- smoo = smoothed[rr]
- the_one = r[numpy.argmax(numpy.absolute(vals-smoo))]
- for i in r:
- if i != the_one:
- to_remove[i] = True
- for v, _ in to_remove.items():
- rpeak_indices.remove(v)
-
- return sorted(rpeak_indices)
+ shift_inds[i] = np.argmin(local_sig)
+
+ # May have to adjust early values
+ for i in range(n_peaks):
+ ind = peak_inds[i]
+ if ind >= search_radius:
+ break
+ shift_inds[i] -= search_radius - ind
+
+ shifted_peak_inds = peak_inds + shift_inds - search_radius
+
+ return shifted_peak_inds
diff --git a/wfdb/processing/qrs.py b/wfdb/processing/qrs.py
new file mode 100644
index 00000000..8c892f3e
--- /dev/null
+++ b/wfdb/processing/qrs.py
@@ -0,0 +1,1242 @@
+import copy
+import numpy as np
+from scipy import signal
+from sklearn.cluster import KMeans
+from sklearn.preprocessing import normalize
+
+from .basic import get_filter_gain
+from .peaks import find_local_peaks
+from ..io.record import Record
+
+
+class XQRS(object):
+ """
+ The qrs detector class for the xqrs algorithm.
+
+ The `XQRS.Conf` class is the configuration class that stores initial
+ parameters for the detection.
+
+ The `XQRS.detect` method runs the detection algorithm.
+
+ The process works as follows:
+ - Load the signal and configuration parameters.
+ - Bandpass filter the signal between 5 and 20 Hz, to get the
+ filtered signal.
+ - Apply moving wave integration (mwi) with a ricker
+ (Mexican hat) wavelet onto the filtered signal, and save the
+ square of the integrated signal.
+ - Conduct learning if specified, to initialize running
+ parameters of noise and qrs amplitudes, the qrs detection
+ threshold, and recent rr intervals. If learning is unspecified
+ or fails, use default parameters.
+ - Run the main detection. Iterate through the local maxima of
+ the mwi signal. For each local maxima:
+ - Check if it is a qrs complex. To be classified as a qrs,
+ it must come after the refractory period, cross the qrs
+ detection threshold, and not be classified as a t-wave
+ if it comes close enough to the previous qrs. If
+ successfully classified, update running detection
+ threshold and heart rate parameters.
+ - If not a qrs, classify it as a noise peak and update
+ running parameters.
+ - Before continuing to the next local maxima, if no qrs
+ was detected within 1.66 times the recent rr interval,
+ perform backsearch qrs detection. This checks previous
+ peaks using a lower qrs detection threshold.
+
+ Examples
+ --------
+ >>> import wfdb
+ >>> from wfdb import processing
+
+ >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
+ >>> xqrs = processing.XQRS(sig=sig[:,0], fs=fields['fs'])
+ >>> xqrs.detect()
+
+ >>> wfdb.plot_items(signal=sig, ann_samp=[xqrs.qrs_inds])
+
+ """
+
+ def __init__(self, sig, fs, conf=None):
+ if sig.ndim != 1:
+ raise ValueError('sig must be a 1d numpy array')
+ self.sig = sig
+ self.fs = fs
+ self.sig_len = len(sig)
+ self.conf = conf or XQRS.Conf()
+ self._set_conf()
+
+ class Conf(object):
+ """
+ Initial signal configuration object for this qrs detector
+ """
+ def __init__(self, hr_init=75, hr_max=200, hr_min=25, qrs_width=0.1,
+ qrs_thr_init=0.13, qrs_thr_min=0, ref_period=0.2,
+ t_inspect_period=0.36):
+ """
+ Parameters
+ ----------
+ hr_init : int or float, optional
+ Initial heart rate in beats per minute. Used for
+ hr_max : int or float, optional
+ Hard maximum heart rate between two beats, in beats per
+ minute. Used for refractory period.
+ hr_min : int or float, optional
+ Hard minimum heart rate between two beats, in beats per
+ minute. Used for calculating recent rr intervals.
+ qrs_width : int or float, optional
+ Expected qrs width in seconds. Used for filter widths
+ indirect refractory period.
+ qrs_thr_init : int or float, optional
+ Initial qrs detection threshold in mV. Use when learning
+ is False, or learning fails.
+ qrs_thr_min : int or float or string, optional
+ Hard minimum detection threshold of qrs wave. Leave as 0
+ for no minimum.
+ ref_period : int or float, optional
+ The qrs refractory period.
+ t_inspect_period : int or float, optional
+ The period below which a potential qrs complex is
+ inspected to see if it is a t wave.
+
+ """
+ if hr_min < 0:
+ raise ValueError("'hr_min' must be <= 0")
+
+ if not hr_min < hr_init < hr_max:
+ raise ValueError("'hr_min' < 'hr_init' < 'hr_max' must be True")
+
+ if qrs_thr_init < qrs_thr_min:
+ raise ValueError("qrs_thr_min must be <= qrs_thr_init")
+
+ self.hr_init = hr_init
+ self.hr_max = hr_max
+ self.hr_min = hr_min
+ self.qrs_width = qrs_width
+ self.qrs_radius = self.qrs_width / 2
+ self.qrs_thr_init = qrs_thr_init
+ self.qrs_thr_min = qrs_thr_min
+ self.ref_period = ref_period
+ self.t_inspect_period = t_inspect_period
+
+ def _set_conf(self):
+ """
+ Set configuration parameters from the Conf object into the detector
+ object.
+
+ Time values are converted to samples, and amplitude values are in mV.
+ """
+ self.rr_init = 60 * self.fs / self.conf.hr_init
+ self.rr_max = 60 * self.fs / self.conf.hr_min
+ self.rr_min = 60 * self.fs / self.conf.hr_max
+
+ self.qrs_width = int(self.conf.qrs_width * self.fs)
+ self.qrs_radius = int(self.conf.qrs_radius * self.fs)
+
+ self.qrs_thr_init = self.conf.qrs_thr_init
+ self.qrs_thr_min = self.conf.qrs_thr_min
+
+ self.ref_period = int(self.conf.ref_period * self.fs)
+ self.t_inspect_period = int(self.conf.t_inspect_period * self.fs)
+
+
+ def _bandpass(self, fc_low=5, fc_high=20):
+ """
+ Apply a bandpass filter onto the signal, and save the filtered signal.
+ """
+ self.fc_low = fc_low
+ self.fc_high = fc_high
+
+ b, a = signal.butter(2, [float(fc_low) * 2 / self.fs,
+ float(fc_high) * 2 / self.fs], 'pass')
+ self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto],
+ axis=0)
+ # Save the passband gain (x2 due to double filtering)
+ self.filter_gain = get_filter_gain(b, a, np.mean([fc_low, fc_high]),
+ self.fs) * 2
+
+
+ def _mwi(self):
+ """
+ Apply moving wave integration with a ricker (Mexican hat) wavelet onto
+ the filtered signal, and save the square of the integrated signal.
+
+ The width of the hat is equal to the qrs width
+
+ Also find all local peaks in the mwi signal.
+ """
+ b = signal.ricker(self.qrs_width, 4)
+ self.sig_i = signal.filtfilt(b, [1], self.sig_f, axis=0) ** 2
+
+ # Save the mwi gain (x2 due to double filtering) and the total gain
+ # from raw to mwi
+ self.mwi_gain = get_filter_gain(b, [1],
+ np.mean([self.fc_low, self.fc_high]), self.fs) * 2
+ self.transform_gain = self.filter_gain * self.mwi_gain
+ self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius)
+ self.n_peaks_i = len(self.peak_inds_i)
+
+ def _learn_init_params(self, n_calib_beats=8):
+ """
+ Find a number of consecutive beats and use them to initialize:
+ - recent qrs amplitude
+ - recent noise amplitude
+ - recent rr interval
+ - qrs detection threshold
+
+ The learning works as follows:
+ - Find all local maxima (largest sample within `qrs_radius`
+ samples) of the filtered signal.
+ - Inspect the local maxima until `n_calib_beats` beats are
+ found:
+ - Calculate the cross-correlation between a ricker wavelet of
+ length `qrs_width`, and the filtered signal segment centered
+ around the local maximum.
+ - If the cross-correlation exceeds 0.6, classify it as a beat.
+ - Use the beats to initialize the previously described
+ parameters.
+ - If the system fails to find enough beats, the default
+ parameters will be used instead. See the docstring of
+ `XQRS._set_default_init_params` for detauls.
+
+ Parameters
+ ----------
+ n_calib_beats : int, optional
+ Number of calibration beats to detect for learning
+
+
+ """
+ if self.verbose:
+ print('Learning initial signal parameters...')
+
+ # Find the local peaks of the signal.
+ peak_inds_f = find_local_peaks(self.sig_f,
+ int(self.qrs_width / 2))
+
+ last_qrs_ind = -self.rr_max
+ qrs_inds = []
+ qrs_amps = []
+ noise_amps = []
+
+ qrs_radius = int(self.qrs_width / 2)
+ ricker_wavelet = signal.ricker(self.qrs_width, 4).reshape(-1,1)
+
+ # Go through the peaks and find qrs peaks and noise peaks.
+ for peak_num in range(
+ np.where(peak_inds_f > self.qrs_width)[0][0],
+ len(peak_inds_f)):
+
+ i = peak_inds_f[peak_num]
+
+ # Calculate cross-correlation between the filtered signal
+ # segment and a ricker wavelet
+
+ # Question: should the signal be squared? Case for inverse qrs
+ # complexes
+ sig_segment = normalize((self.sig_f[i - qrs_radius:i + qrs_radius]
+ ).reshape(-1, 1), axis=0)
+
+ xcorr = np.correlate(sig_segment[:, 0], ricker_wavelet[:,0])
+
+ # Classify as qrs if xcorr is large enough
+ if xcorr > 0.6 and i-last_qrs_ind > self.rr_min:
+ last_qrs_ind = i
+ qrs_inds.append(i)
+ qrs_amps.append(self.sig_i[i])
+ else:
+ noise_amps.append(self.sig_i[i])
+
+ if len(qrs_inds) == n_calib_beats:
+ break
+
+ # Found enough calibration beats to initialize parameters
+ if len(qrs_inds) == n_calib_beats:
+
+ if self.verbose:
+ print('Found %d beats during learning.' % n_calib_beats
+ + ' Initializing using learned parameters')
+
+ # QRS amplitude is most important.
+ qrs_amp = np.mean(qrs_amps)
+
+ # Set noise amplitude if found
+ if noise_amps:
+ noise_amp = np.mean(noise_amps)
+ else:
+ # Set default of 1/10 of qrs amplitude
+ noise_amp = qrs_amp / 10
+
+ # Get rr intervals of consecutive beats, if any.
+ rr_intervals = np.diff(qrs_inds)
+ rr_intervals = rr_intervals[rr_intervals < self.rr_max]
+ if rr_intervals.any():
+ rr_recent = np.mean(rr_intervals)
+ else:
+ rr_recent = self.rr_init
+
+ # If an early qrs was detected, set last_qrs_ind so that it can be
+ # picked up.
+ last_qrs_ind = min(0, qrs_inds[0] - self.rr_min - 1)
+
+ self._set_init_params(qrs_amp_recent=qrs_amp,
+ noise_amp_recent=noise_amp,
+ rr_recent=rr_recent,
+ last_qrs_ind=last_qrs_ind)
+
+ # Failed to find enough calibration beats. Use default values.
+ else:
+ if self.verbose:
+ print('Failed to find %d beats during learning.'
+ % n_calib_beats)
+
+ self._set_init_params()
+
+
+ def _set_init_params(self, qrs_amp_recent, noise_amp_recent, rr_recent,
+ last_qrs_ind):
+ """
+ Set initial online parameters
+ """
+ self.qrs_amp_recent = qrs_amp_recent
+ self.noise_amp_recent = noise_amp_recent
+ # What happens if qrs_thr is calculated to be less than the explicit
+ # min threshold? Should print warning?
+ self.qrs_thr = max(0.25*self.qrs_amp_recent
+ + 0.75*self.noise_amp_recent,
+ self.qrs_thr_min * self.transform_gain)
+ self.rr_recent = rr_recent
+ self.last_qrs_ind = last_qrs_ind
+
+ # No qrs detected initially
+ self.last_qrs_peak_num = None
+
+
+ def _set_default_init_params(self):
+ """
+ Set initial running parameters using default values.
+
+ The steady state equation is:
+ `qrs_thr = 0.25*qrs_amp + 0.75*noise_amp`
+
+ Estimate that qrs amp is 10x noise amp, giving:
+ `qrs_thr = 0.325 * qrs_amp or 13/40 * qrs_amp`
+
+ """
+ if self.verbose:
+ print('Initializing using default parameters')
+ # Multiply the specified ecg thresholds by the filter and mwi gain
+ # factors
+ qrs_thr_init = self.qrs_thr_init * self.transform_gain
+ qrs_thr_min = self.qrs_thr_min * self.transform_gain
+
+ qrs_amp = 27/40 * qrs_thr_init
+ noise_amp = qrs_amp / 10
+ rr_recent = self.rr_init
+ last_qrs_ind = 0
+
+ self._set_init_params(qrs_amp_recent=qrs_amp,
+ noise_amp_recent=noise_amp,
+ rr_recent=rr_recent,
+ last_qrs_ind=last_qrs_ind)
+
+ def _is_qrs(self, peak_num, backsearch=False):
+ """
+ Check whether a peak is a qrs complex. It is classified as qrs
+ if it:
+ - Comes after the refractory period
+ - Passes qrs threshold
+ - Is not a t-wave (check it if the peak is close to the previous
+ qrs).
+
+ Parameters
+ ----------
+ peak_num : int
+ The peak number of the mwi signal to be inspected
+ backsearch: bool, optional
+ Whether the peak is being inspected during backsearch
+
+ """
+ i = self.peak_inds_i[peak_num]
+ if backsearch:
+ qrs_thr = self.qrs_thr / 2
+ else:
+ qrs_thr = self.qrs_thr
+
+ if (i-self.last_qrs_ind > self.ref_period
+ and self.sig_i[i] > qrs_thr):
+ if i-self.last_qrs_ind < self.t_inspect_period:
+ if self._is_twave(peak_num):
+ return False
+ return True
+
+ return False
+
+
+ def _update_qrs(self, peak_num, backsearch=False):
+ """
+ Update live qrs parameters. Adjust the recent rr-intervals and
+ qrs amplitudes, and the qrs threshold.
+
+ Parameters
+ ----------
+ peak_num : int
+ The peak number of the mwi signal where the qrs is detected
+ backsearch: bool, optional
+ Whether the qrs was found via backsearch
+ """
+
+ i = self.peak_inds_i[peak_num]
+
+ # Update recent rr if the beat is consecutive (do this before
+ # updating self.last_qrs_ind)
+ rr_new = i - self.last_qrs_ind
+ if rr_new < self.rr_max:
+ self.rr_recent = 0.875*self.rr_recent + 0.125*rr_new
+
+ self.qrs_inds.append(i)
+ self.last_qrs_ind = i
+ # Peak number corresponding to last qrs
+ self.last_qrs_peak_num = self.peak_num
+
+ # qrs recent amplitude is adjusted twice as quickly if the peak
+ # was found via backsearch
+ if backsearch:
+ self.backsearch_qrs_inds.append(i)
+ self.qrs_amp_recent = (0.75*self.qrs_amp_recent
+ + 0.25*self.sig_i[i])
+ else:
+ self.qrs_amp_recent = (0.875*self.qrs_amp_recent
+ + 0.125*self.sig_i[i])
+
+ self.qrs_thr = max((0.25*self.qrs_amp_recent
+ + 0.75*self.noise_amp_recent), self.qrs_thr_min)
+
+ return
+
+
+ def _is_twave(self, peak_num):
+ """
+ Check whether a segment is a t-wave. Compare the maximum gradient of
+ the filtered signal segment with that of the previous qrs segment.
+
+ Parameters
+ ----------
+ peak_num : int
+ The peak number of the mwi signal where the qrs is detected
+
+ """
+ i = self.peak_inds_i[peak_num]
+
+ # Due to initialization parameters, last_qrs_ind may be negative.
+ # No way to check in this instance.
+ if self.last_qrs_ind - self.qrs_radius < 0:
+ return False
+
+ # Get half the qrs width of the signal to the left.
+ # Should this be squared?
+ sig_segment = normalize((self.sig_f[i - self.qrs_radius:i]
+ ).reshape(-1, 1), axis=0)
+ last_qrs_segment = self.sig_f[self.last_qrs_ind - self.qrs_radius:
+ self.last_qrs_ind]
+
+ segment_slope = np.diff(sig_segment)
+ last_qrs_slope = np.diff(last_qrs_segment)
+
+ # Should we be using absolute values?
+ if max(segment_slope) < 0.5*max(abs(last_qrs_slope)):
+ return True
+ else:
+ return False
+
+ def _update_noise(self, peak_num):
+ """
+ Update live noise parameters
+ """
+ i = self.peak_inds_i[peak_num]
+ self.noise_amp_recent = (0.875*self.noise_amp_recent
+ + 0.125*self.sig_i[i])
+ return
+
+ def _require_backsearch(self):
+ """
+ Determine whether a backsearch should be performed on prior peaks
+ """
+ if self.peak_num == self.n_peaks_i-1:
+ # If we just return false, we may miss a chance to backsearch.
+ # Update this?
+ return False
+
+ next_peak_ind = self.peak_inds_i[self.peak_num + 1]
+
+ if next_peak_ind-self.last_qrs_ind > self.rr_recent*1.66:
+ return True
+ else:
+ return False
+
+ def _backsearch(self):
+ """
+ Inspect previous peaks from the last detected qrs peak (if any),
+ using a lower threshold
+
+ """
+ if self.last_qrs_peak_num is not None:
+ for peak_num in range(self.last_qrs_peak_num + 1, self.peak_num + 1):
+ if self._is_qrs(peak_num=peak_num, backsearch=True):
+ self._update_qrs(peak_num=peak_num, backsearch=True)
+ # No need to update noise parameters if it was classified as
+ # noise. It would have already been updated.
+
+ def _run_detection(self):
+ """
+ Run the qrs detection after all signals and parameters have been
+ configured and set.
+
+ """
+ if self.verbose:
+ print('Running QRS detection...')
+
+ # Detected qrs indices
+ self.qrs_inds = []
+ # qrs indices found via backsearch
+ self.backsearch_qrs_inds = []
+
+ # Iterate through mwi signal peak indices
+ for self.peak_num in range(self.n_peaks_i):
+ if self._is_qrs(self.peak_num):
+ self._update_qrs(self.peak_num)
+ else:
+ self._update_noise(self.peak_num)
+
+ # Before continuing to the next peak, do backsearch if
+ # necessary
+ if self._require_backsearch():
+ self._backsearch()
+
+ # Detected indices are relative to starting sample
+ if self.qrs_inds:
+ self.qrs_inds = np.array(self.qrs_inds) + self.sampfrom
+ else:
+ self.qrs_inds = np.array(self.qrs_inds)
+
+ if self.verbose:
+ print('QRS detection complete.')
+
+
+ def detect(self, sampfrom=0, sampto='end', learn=True, verbose=True):
+ """
+ Detect qrs locations between two samples.
+
+ Parameters
+ ----------
+ sampfrom : int, optional
+ The starting sample number to run the detection on.
+ sampto : int, optional
+ The final sample number to run the detection on. Set as
+ 'end' to run on the entire signal.
+ learn : bool, optional
+ Whether to apply learning on the signal before running the
+ main detection. If learning fails or is not conducted, the
+ default configuration parameters will be used to initialize
+ these variables. See the `XQRS._learn_init_params` docstring
+ for details.
+ verbose : bool, optional
+ Whether to display the stages and outcomes of the detection
+ process.
+
+ """
+ if sampfrom < 0:
+ raise ValueError("'sampfrom' cannot be negative")
+ self.sampfrom = sampfrom
+
+ if sampto == 'end':
+ sampto = self.sig_len
+ elif sampto > self.sig_len:
+ raise ValueError("'sampto' cannot exceed the signal length")
+ self.sampto = sampto
+ self.verbose = verbose
+
+ # Get/set signal configuration fields from Conf object
+ self._set_conf()
+ # Bandpass filter the signal
+ self._bandpass()
+ # Compute moving wave integration of filtered signal
+ self._mwi()
+
+ # Initialize the running parameters
+ if learn:
+ self._learn_init_params()
+ else:
+ self._set_default_init_params()
+
+ # Run the detection
+ self._run_detection()
+
+
+def xqrs_detect(sig, fs, sampfrom=0, sampto='end', conf=None,
+ learn=True, verbose=True):
+ """
+ Run the 'xqrs' qrs detection algorithm on a signal. See the
+ docstring of the XQRS class for algorithm details.
+
+ Parameters
+ ----------
+ sig : numpy array
+ The input ecg signal to apply the qrs detection on.
+ fs : int or float
+ The sampling frequency of the input signal.
+ sampfrom : int, optional
+ The starting sample number to run the detection on.
+ sampto :
+ The final sample number to run the detection on. Set as 'end' to
+ run on the entire signal.
+ conf : XQRS.Conf object, optional
+ The configuration object specifying signal configuration
+ parameters. See the docstring of the XQRS.Conf class.
+ learn : bool, optional
+ Whether to apply learning on the signal before running the main
+ detection. If learning fails or is not conducted, the default
+ configuration parameters will be used to initialize these
+ variables.
+ verbose : bool, optional
+ Whether to display the stages and outcomes of the detection
+ process.
+
+ Returns
+ -------
+ qrs_inds : numpy array
+ The indices of the detected qrs complexes
+
+ Examples
+ --------
+ >>> import wfdb
+ >>> from wfdb import processing
+
+ >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0])
+ >>> qrs_inds = processing.xqrs_detect(sig=sig[:,0], fs=fields['fs'])
+
+ """
+ xqrs = XQRS(sig=sig, fs=fs, conf=conf)
+ xqrs.detect(sampfrom=sampfrom, sampto=sampto)
+ return xqrs.qrs_inds
+
+
+def time_to_sample_number(seconds, frequency):
+ return seconds * frequency + 0.5
+
+
+class Conf(object):
+ def __init__(self, fs, adc_gain, hr=75,
+ RRdelta=0.2, RRmin=0.28, RRmax=2.4,
+ QS=0.07, QT=0.35,
+ RTmin=0.25, RTmax=0.33,
+ QRSa=750, QRSamin=130,
+ thresh=1.0):
+ self.fs = fs
+
+ self.sps = int(time_to_sample_number(1, fs))
+ self.spm = int(time_to_sample_number(60, fs))
+
+ self.hr = hr
+ self.RR = 60.0 / self.hr
+ self.RRdelta = RRdelta
+ self.RRmin = RRmin
+ self.RRmax = RRmax
+ self.QS = QS
+ self.QT = QT
+ self.RTmin = RTmin
+ self.RTmax = RTmax
+ self.QRSa = QRSa
+ self.QRSamin = QRSamin
+ self.thresh = thresh
+
+ self._NORMAL = 1 # normal beat
+ self._ARFCT = 16 # isolated QRS-like artifact
+ self._NOTE = 22 # comment annotation
+ self._TWAVE = 27 # T-wave peak
+ self._NPEAKS = 64 # number of peaks buffered (per signal)
+ self._BUFLN = 32768 # must be a power of 2, see qf()
+
+ self.rrmean = int(self.RR * self.sps)
+ self.rrdev = int(self.RRdelta * self.sps)
+ self.rrmin = int(self.RRmin * self.sps)
+ self.rrmax = int(self.RRmax * self.sps)
+
+ self.rrinc = int(self.rrmean / 40)
+ if self.rrinc < 1:
+ self.rrinc = 1
+
+ self.dt = int(self.QS * self.sps / 4)
+ if self.dt < 1:
+ self.dt = 1
+ print("Warning: sampling rate may be too low!")
+
+ self.rtmin = int(self.RTmin * self.sps)
+ self.rtmean = int(0.75 * self.QT * self.sps)
+ self.rtmax = int(self.RTmax * self.sps)
+
+ dv = adc_gain * self.QRSamin * 0.001
+ self.pthr = int((self.thresh * dv * dv) / 6)
+ self.qthr = self.pthr << 1
+ self.pthmin = self.pthr >> 2
+ self.qthmin = int((self.pthmin << 2) / 3)
+ self.tamean = self.qthr # initial value for mean T-wave amplitude
+
+ # Filter constants and thresholds.
+ self.dt2 = 2 * self.dt
+ self.dt3 = 3 * self.dt
+ self.dt4 = 4 * self.dt
+
+ self.smdt = self.dt
+ self.v1norm = self.smdt * self.dt * 64
+
+ self.smt = 0
+ self.smt0 = 0 + self.smdt
+
+
+class Peak(object):
+ def __init__(self, peak_time, peak_amp, peak_type):
+ self.time = peak_time
+ self.amp = peak_amp
+ self.type = peak_type
+ self.next_peak = None
+ self.prev_peak = None
+
+
+class Annotation(object):
+ def __init__(self, ann_time, ann_type, ann_subtype, ann_num):
+ self.time = ann_time
+ self.type = ann_type
+ self.subtype = ann_subtype
+ self.num = ann_num
+
+
+class GQRS(object):
+ def putann(self, annotation):
+ self.annotations.append(copy.deepcopy(annotation))
+
+ def detect(self, x, conf, adc_zero):
+ """
+ Run detection. x is digital signal
+ """
+ self.c = conf
+ self.annotations = []
+ self.sample_valid = False
+
+ if len(x) < 1:
+ return []
+
+ self.x = x
+ self.adc_zero = adc_zero
+
+ self.qfv = np.zeros((self.c._BUFLN), dtype="int64")
+ self.smv = np.zeros((self.c._BUFLN), dtype="int64")
+ self.v1 = 0
+
+ t0 = 0
+ self.tf = len(x) - 1
+ self.t = 0 - self.c.dt4
+
+ self.annot = Annotation(0, "NOTE", 0, 0)
+
+ # Cicular buffer of Peaks
+ first_peak = Peak(0, 0, 0)
+ tmp = first_peak
+ for _ in range(1, self.c._NPEAKS):
+ tmp.next_peak = Peak(0, 0, 0)
+ tmp.next_peak.prev_peak = tmp
+ tmp = tmp.next_peak
+ tmp.next_peak = first_peak
+ first_peak.prev_peak = tmp
+ self.current_peak = first_peak
+
+ if self.c.spm > self.c._BUFLN:
+ if self.tf - t0 > self.c._BUFLN:
+ tf_learn = t0 + self.c._BUFLN - self.c.dt4
+ else:
+ tf_learn = self.tf - self.c.dt4
+ else:
+ if self.tf - t0 > self.c.spm:
+ tf_learn = t0 + self.c.spm - self.c.dt4
+ else:
+ tf_learn = self.tf - self.c.dt4
+
+ self.countdown = -1
+ self.state = "LEARNING"
+ self.gqrs(t0, tf_learn)
+
+ self.rewind_gqrs()
+
+ self.state = "RUNNING"
+ self.t = t0 - self.c.dt4
+ self.gqrs(t0, self.tf)
+
+ return self.annotations
+
+ def rewind_gqrs(self):
+ self.countdown = -1
+ self.at(self.t)
+ self.annot.time = 0
+ self.annot.type = "NORMAL"
+ self.annot.subtype = 0
+ self.annot.num = 0
+ p = self.current_peak
+ for _ in range(self.c._NPEAKS):
+ p.time = 0
+ p.type = 0
+ p.amp = 0
+ p = p.next_peak
+
+ def at(self, t):
+ if t < 0:
+ self.sample_valid = True
+ return self.x[0]
+ if t > len(self.x) - 1:
+ self.sample_valid = False
+ return self.x[-1]
+ self.sample_valid = True
+ return self.x[t]
+
+ def smv_at(self, t):
+ return self.smv[t & (self.c._BUFLN - 1)]
+
+ def smv_put(self, t, v):
+ self.smv[t & (self.c._BUFLN - 1)] = v
+
+ def qfv_at(self, t):
+ return self.qfv[t & (self.c._BUFLN - 1)]
+
+ def qfv_put(self, t, v):
+ self.qfv[t & (self.c._BUFLN - 1)] = v
+
+ def sm(self, at_t):
+ # implements a trapezoidal low pass (smoothing) filter
+ # (with a gain of 4*smdt) applied to input signal sig
+ # before the QRS matched filter qf().
+ # Before attempting to 'rewind' by more than BUFLN-smdt
+ # samples, reset smt and smt0.
+
+ # Calculate samp values from self.smt to at_t.
+ smt = self.c.smt
+ smdt = int(self.c.smdt)
+
+ v = 0
+ while at_t > smt:
+ smt += 1
+ # from dt+1 onwards
+ if smt > int(self.c.smt0):
+ tmp = int(self.smv_at(smt - 1) + \
+ self.at(smt + smdt) + self.at(smt + smdt - 1) - \
+ self.at(smt - smdt) - self.at(smt - smdt - 1))
+ self.smv_put(smt, tmp)
+ self.SIG_SMOOTH.append(tmp)
+ # from 1 to dt. 0 is never calculated.
+ else:
+ v = int(self.at(smt))
+ for j in range(1, smdt):
+ smtpj = self.at(smt + j)
+ smtlj = self.at(smt - j)
+ v += int(smtpj + smtlj)
+ self.smv_put(smt, (v << 1) + self.at(smt + j+1) + self.at(smt - j-1) - \
+ self.adc_zero * (smdt << 2))
+
+ self.SIG_SMOOTH.append((v << 1) + self.at(smt + j+1) + self.at(smt - j-1) - \
+ self.adc_zero * (smdt << 2))
+ self.c.smt = smt
+
+ return self.smv_at(at_t)
+
+ def qf(self):
+ # evaluate the QRS detector filter for the next sample
+
+ # do this first, to ensure that all of the other smoothed values needed below are in the buffer
+ dv2 = self.sm(self.t + self.c.dt4)
+ dv2 -= self.smv_at(self.t - self.c.dt4)
+ dv1 = int(self.smv_at(self.t + self.c.dt) - self.smv_at(self.t - self.c.dt))
+ dv = dv1 << 1
+ dv -= int(self.smv_at(self.t + self.c.dt2) - self.smv_at(self.t - self.c.dt2))
+ dv = dv << 1
+ dv += dv1
+ dv -= int(self.smv_at(self.t + self.c.dt3) - self.smv_at(self.t - self.c.dt3))
+ dv = dv << 1
+ dv += dv2
+ self.v1 += dv
+ v0 = int(self.v1 / self.c.v1norm)
+ self.qfv_put(self.t, v0 * v0)
+ self.SIG_QRS.append(v0 ** 2)
+
+ def gqrs(self, from_sample, to_sample):
+ q0 = None
+ q1 = 0
+ q2 = 0
+ rr = None
+ rrd = None
+ rt = None
+ rtd = None
+ rtdmin = None
+
+ p = None # (Peak)
+ q = None # (Peak)
+ r = None # (Peak)
+ tw = None # (Peak)
+
+ last_peak = from_sample
+ last_qrs = from_sample
+
+ self.SIG_SMOOTH = []
+ self.SIG_QRS = []
+
+ def add_peak(peak_time, peak_amp, type):
+ p = self.current_peak.next_peak
+ p.time = peak_time
+ p.amp = peak_amp
+ p.type = type
+ self.current_peak = p
+ p.next_peak.amp = 0
+
+ def peaktype(p):
+ # peaktype() returns 1 if p is the most prominent peak in its neighborhood, 2
+ # otherwise. The neighborhood consists of all other peaks within rrmin.
+ # Normally, "most prominent" is equivalent to "largest in amplitude", but this
+ # is not always true. For example, consider three consecutive peaks a, b, c
+ # such that a and b share a neighborhood, b and c share a neighborhood, but a
+ # and c do not; and suppose that amp(a) > amp(b) > amp(c). In this case, if
+ # there are no other peaks, a is the most prominent peak in the (a, b)
+ # neighborhood. Since b is thus identified as a non-prominent peak, c becomes
+ # the most prominent peak in the (b, c) neighborhood. This is necessary to
+ # permit detection of low-amplitude beats that closely precede or follow beats
+ # with large secondary peaks (as, for example, in R-on-T PVCs).
+ if p.type:
+ return p.type
+ else:
+ a = p.amp
+ t0 = p.time - self.c.rrmin
+ t1 = p.time + self.c.rrmin
+
+ if t0 < 0:
+ t0 = 0
+
+ pp = p.prev_peak
+ while t0 < pp.time and pp.time < pp.next_peak.time:
+ if pp.amp == 0:
+ break
+ if a < pp.amp and peaktype(pp) == 1:
+ p.type = 2
+ return p.type
+ # end:
+ pp = pp.prev_peak
+
+ pp = p.next_peak
+ while pp.time < t1 and pp.time > pp.prev_peak.time:
+ if pp.amp == 0:
+ break
+ if a < pp.amp and peaktype(pp) == 1:
+ p.type = 2
+ return p.type
+ # end:
+ pp = pp.next_peak
+
+ p.type = 1
+ return p.type
+
+ def find_missing(r, p):
+ if r is None or p is None:
+ return None
+
+ minrrerr = p.time - r.time
+
+ s = None
+ q = r.next_peak
+ while q.time < p.time:
+ if peaktype(q) == 1:
+ rrtmp = q.time - r.time
+ rrerr = rrtmp - self.c.rrmean
+ if rrerr < 0:
+ rrerr = -rrerr
+ if rrerr < minrrerr:
+ minrrerr = rrerr
+ s = q
+ # end:
+ q = q.next_peak
+
+ return s
+
+ r = None
+ next_minute = 0
+ minutes = 0
+ while self.t <= to_sample + self.c.sps:
+ if self.countdown < 0:
+ if self.sample_valid:
+ self.qf()
+ else:
+ self.countdown = int(time_to_sample_number(1, self.c.fs))
+ self.state = "CLEANUP"
+ else:
+ self.countdown -= 1
+ if self.countdown < 0:
+ break
+
+ q0 = self.qfv_at(self.t)
+ q1 = self.qfv_at(self.t - 1)
+ q2 = self.qfv_at(self.t - 2)
+ # state == RUNNING only
+ if q1 > self.c.pthr and q2 < q1 and q1 >= q0 and self.t > self.c.dt4:
+ add_peak(self.t - 1, q1, 0)
+ last_peak = self.t - 1
+ p = self.current_peak.next_peak
+ while p.time < self.t - self.c.rtmax:
+ if p.time >= self.annot.time + self.c.rrmin and peaktype(p) == 1:
+ if p.amp > self.c.qthr:
+ rr = p.time - self.annot.time
+ q = find_missing(r, p)
+ if rr > self.c.rrmean + 2 * self.c.rrdev and \
+ rr > 2 * (self.c.rrmean - self.c.rrdev) and \
+ q is not None:
+ p = q
+ rr = p.time - self.annot.time
+ self.annot.subtype = 1
+ rrd = rr - self.c.rrmean
+ if rrd < 0:
+ rrd = -rrd
+ self.c.rrdev += (rrd - self.c.rrdev) >> 3
+ if rrd > self.c.rrinc:
+ rrd = self.c.rrinc
+ if rr > self.c.rrmean:
+ self.c.rrmean += rrd
+ else:
+ self.c.rrmean -= rrd
+ if p.amp > self.c.qthr * 4:
+ self.c.qthr += 1
+ elif p.amp < self.c.qthr:
+ self.c.qthr -= 1
+ if self.c.qthr > self.c.pthr * 20:
+ self.c.qthr = self.c.pthr * 20
+ last_qrs = p.time
+
+ if self.state == "RUNNING":
+ self.annot.time = p.time - self.c.dt2
+ self.annot.type = "NORMAL"
+ qsize = int(p.amp * 10.0 / self.c.qthr)
+ if qsize > 127:
+ qsize = 127
+ self.annot.num = qsize
+ self.putann(self.annot)
+ self.annot.time += self.c.dt2
+
+ # look for this beat's T-wave
+ tw = None
+ rtdmin = self.c.rtmean
+ q = p.next_peak
+ while q.time > self.annot.time:
+ rt = q.time - self.annot.time - self.c.dt2
+ if rt < self.c.rtmin:
+ # end:
+ q = q.next_peak
+ continue
+ if rt > self.c.rtmax:
+ break
+ rtd = rt - self.c.rtmean
+ if rtd < 0:
+ rtd = -rtd
+ if rtd < rtdmin:
+ rtdmin = rtd
+ tw = q
+ # end:
+ q = q.next_peak
+ if tw is not None:
+ tmp_time = tw.time - self.c.dt2
+ tann = Annotation(tmp_time, "TWAVE",
+ 1 if tmp_time > self.annot.time + self.c.rtmean else 0,
+ rtdmin)
+ # if self.state == "RUNNING":
+ # self.putann(tann)
+ rt = tann.time - self.annot.time
+ self.c.rtmean += (rt - self.c.rtmean) >> 4
+ if self.c.rtmean > self.c.rtmax:
+ self.c.rtmean = self.c.rtmax
+ elif self.c.rtmean < self.c.rtmin:
+ self.c.rtmean = self.c.rrmin
+ tw.type = 2 # mark T-wave as secondary
+ r = p
+ q = None
+ self.annot.subtype = 0
+ elif self.t - last_qrs > self.c.rrmax and self.c.qthr > self.c.qthmin:
+ self.c.qthr -= (self.c.qthr >> 4)
+ # end:
+ p = p.next_peak
+ elif self.t - last_peak > self.c.rrmax and self.c.pthr > self.c.pthmin:
+ self.c.pthr -= (self.c.pthr >> 4)
+
+ self.t += 1
+ if self.t >= next_minute:
+ next_minute += self.c.spm
+ minutes += 1
+ if minutes >= 60:
+ minutes = 0
+
+ if self.state == "LEARNING":
+ return
+
+ # Mark the last beat or two.
+ p = self.current_peak.next_peak
+ while p.time < p.next_peak.time:
+ if p.time >= self.annot.time + self.c.rrmin and p.time < self.tf and peaktype(p) == 1:
+ self.annot.type = "NORMAL"
+ self.annot.time = p.time
+ self.putann(self.annot)
+ # end:
+ p = p.next_peak
+
+
+def gqrs_detect(sig=None, fs=None, d_sig=None, adc_gain=None, adc_zero=None,
+ threshold=1.0, hr=75, RRdelta=0.2, RRmin=0.28, RRmax=2.4,
+ QS=0.07, QT=0.35, RTmin=0.25, RTmax=0.33,
+ QRSa=750, QRSamin=130):
+ """
+ Detect qrs locations in a single channel ecg. Functionally, a direct port
+ of the gqrs algorithm from the original wfdb package. Accepts either a
+ physical signal, or a digital signal with known adc_gain and adc_zero.
+
+ See the notes below for a summary of the program. This algorithm is not
+ being developed/supported.
+
+ Parameters
+ ----------
+ sig : 1d numpy array, optional
+ The input physical signal. The detection algorithm which replicates
+ the original, works using digital samples, and this physical option is
+ provided as a convenient interface. If this is the specified input
+ signal, automatic adc is performed using 24 bit precision, to obtain
+ the `d_sig`, `adc_gain`, and `adc_zero` parameters. There may be minor
+ differences in detection results (ie. an occasional 1 sample
+ difference) between using `sig` and `d_sig`. To replicate the exact
+ output of the original gqrs algorithm, use the `d_sig` argument
+ instead.
+ fs : int, or float
+ The sampling frequency of the signal.
+ d_sig : 1d numpy array, optional
+ The input digital signal. If this is the specified input signal rather
+ than `sig`, the `adc_gain` and `adc_zero` parameters must be specified.
+ adc_gain : int, or float, optional
+ The analogue to digital gain of the signal (the number of adus per
+ physical unit).
+ adc_zero: int, optional
+ The value produced by the ADC given a 0 volt input.
+ threshold : int, or float, optional
+ The relative amplitude detection threshold. Used to initialize the peak
+ and qrs detection threshold.
+ hr : int, or float, optional
+ Typical heart rate, in beats per minute.
+ RRdelta : int or float, optional
+ Typical difference between successive RR intervals in seconds.
+ RRmin : int or float, optional
+ Minimum RR interval ("refractory period"), in seconds.
+ RRmax : int or float, optional
+ Maximum RR interval, in seconds. Thresholds will be adjusted if no
+ peaks are detected within this interval.
+ QS : int or float, optional
+ Typical QRS duration, in seconds.
+ QT : int or float, optional
+ Typical QT interval, in seconds.
+ RTmin : int or float, optional
+ Minimum interval between R and T peaks, in seconds.
+ RTmax : int or float, optional
+ Maximum interval between R and T peaks, in seconds.
+ QRSa : int or float, optional
+ Typical QRS peak-to-peak amplitude, in microvolts.
+ QRSamin : int or float, optional
+ Minimum QRS peak-to-peak amplitude, in microvolts.
+
+ Returns
+ -------
+ qrs_locs : numpy array
+ Detected qrs locations
+
+
+ Notes
+ -----
+ This function should not be used for signals with fs <= 50Hz
+
+ The algorithm theoretically works as follows:
+ - Load in configuration parameters. They are used to set/initialize the:
+ - allowed rr interval limits (fixed)
+ - initial recent rr interval (running)
+ - qrs width, used for detection filter widths (fixed)
+ - allowed rt interval limits (fixed)
+ - initial recent rt interval (running)
+ - initial peak amplitude detection threshold (running)
+ - initial qrs amplitude detection threshold (running)
+ **Note** that this algorithm does not normalize signal amplitudes, and
+ hence is highly dependent on configuration amplitude parameters.
+ - Apply trapezoid low-pass filtering to the signal
+ - Convolve a QRS matched filter with the filtered signal
+ - Run the learning phase using a calculated signal length: detect qrs and
+ non-qrs peaks as in the main detection phase, without saving the qrs
+ locations. During this phase, running parameters of recent intervals
+ and peak/qrs thresholds are adjusted.
+ - Run the detection::
+ if a sample is bigger than its immediate neighbors and larger
+ than the peak detection threshold, it is a peak.
+ if it is further than RRmin from the previous qrs, and is a
+ *primary peak.
+ if it is further than 2 standard deviations from the
+ previous qrs, do a backsearch for a missed low amplitude
+ beat
+ return the primary peak between the current sample
+ and the previous qrs if any.
+ if it surpasses the qrs threshold, it is a qrs complex
+ save the qrs location.
+ update running rr and qrs amplitude parameters.
+ look for the qrs complex's t-wave and mark it if
+ found.
+ else if it is not a peak
+ lower the peak detection threshold if the last peak found
+ was more than RRmax ago, and not already at its minimum.
+
+ *A peak is secondary if there is a larger peak within its neighborhood
+ (time +- rrmin), or if it has been identified as a T-wave associated with a
+ previous primary peak. A peak is primary if it is largest in its neighborhood,
+ or if the only larger peaks are secondary.
+
+ The above describes how the algorithm should theoretically work, but there
+ are bugs which make the program contradict certain parts of its supposed
+ logic. A list of issues from the original c, code and hence this python
+ implementation can be found here:
+
+ https://github.com/bemoody/wfdb/issues/17
+
+ gqrs will not be supported/developed in this library.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> import wfdb
+ >>> from wfdb import processing
+
+ >>> # Detect using a physical input signal
+ >>> record = wfdb.rdrecord('sample-data/100', channels=[0])
+ >>> qrs_locs = processing.gqrs_detect(record.p_signal[:,0], fs=record.fs)
+
+ >>> # Detect using a digital input signal
+ >>> record_2 = wfdb.rdrecord('sample-data/100', channels=[0], physical=False)
+ >>> qrs_locs_2 = processing.gqrs_detect(d_sig=record_2.d_signal[:,0],
+ fs=record_2.fs,
+ adc_gain=record_2.adc_gain[0],
+ adc_zero=record_2.adc_zero[0])
+
+ """
+ # Perform adc if input signal is physical
+ if sig is not None:
+ record = Record(p_signal=sig.reshape([-1,1]), fmt=['24'])
+ record.set_d_features(do_adc=True)
+ d_sig = record.d_signal[:,0]
+ adc_zero = 0
+ adc_gain = record.adc_gain[0]
+
+ conf = Conf(fs=fs, adc_gain=adc_gain, hr=hr, RRdelta=RRdelta, RRmin=RRmin,
+ RRmax=RRmax, QS=QS, QT=QT, RTmin=RTmin, RTmax=RTmax, QRSa=QRSa,
+ QRSamin=QRSamin, thresh=threshold)
+ gqrs = GQRS()
+
+ annotations = gqrs.detect(x=d_sig, conf=conf, adc_zero=adc_zero)
+
+ return np.array([a.time for a in annotations])
diff --git a/wfdb/readwrite/_headers.py b/wfdb/readwrite/_headers.py
deleted file mode 100644
index e2b64e27..00000000
--- a/wfdb/readwrite/_headers.py
+++ /dev/null
@@ -1,678 +0,0 @@
-import numpy as np
-import re
-import os
-from collections import OrderedDict
-from calendar import monthrange
-from . import _signals
-from . import downloads
-
-# Class of common methods for single and multi-segment headers
-class BaseHeadersMixin(object):
-
- # Helper function for getwritefields
- # specfields is the set of specification fields
- # For record specs, it returns a list of all fields needed.
- # For signal specs, it returns a dictionary of all fields needed,
- # with keys = field and value = list of 1 or 0 indicating channel for the field
- def getwritesubset(self, specfields):
-
- # record specification fields
- if specfields == 'record':
- writefields=[]
- fieldspecs = OrderedDict(reversed(list(recfieldspecs.items())))
- # Remove this requirement for single segs
- if not hasattr(self, 'nseg'):
- del(fieldspecs['nseg'])
-
- for f in fieldspecs:
- if f in writefields:
- continue
- # If the field is required by default or has been defined by the user
- if fieldspecs[f].write_req or getattr(self, f) is not None:
- rf=f
- # Add the field and its recursive dependencies
- while rf is not None:
- writefields.append(rf)
- rf=fieldspecs[rf].dependency
- # Add comments if any
- if getattr(self, 'comments') is not None:
- writefields.append('comments')
-
- # signal spec field. Need to return a potentially different list for each channel.
- elif specfields == 'signal':
- # List of lists for each channel
- writefields=[]
-
- allwritefields=[]
- fieldspecs = OrderedDict(reversed(list(sigfieldspecs.items())))
-
- for ch in range(self.nsig):
- # The fields needed for this channel
- writefieldsch = []
- for f in fieldspecs:
- if f in writefieldsch:
- continue
-
- fielditem = getattr(self, f)
- # If the field is required by default or has been defined by the user
- if fieldspecs[f].write_req or (fielditem is not None and fielditem[ch] is not None):
- rf=f
- # Add the field and its recursive dependencies
- while rf is not None:
- writefieldsch.append(rf)
- rf=fieldspecs[rf].dependency
-
- writefields.append(writefieldsch)
-
- # Convert the list of lists to a single dictionary.
- # keys = field and value = list of 1 or 0 indicating channel for the field
- dictwritefields = {}
-
- # For fields present in any channel:
- for f in set([i for wsub in writefields for i in wsub]):
- dictwritefields[f] = [0]*self.nsig
-
- for ch in range(self.nsig):
- if f in writefields[ch]:
- dictwritefields[f][ch] = 1
-
- writefields = dictwritefields
-
-
- return writefields
-
-
-# Class with single-segment header methods
-# To be inherited by WFDBrecord from records.py.
-class HeadersMixin(BaseHeadersMixin):
-
- def setdefaults(self):
- """
- Set defaults for fields needed to write the header if they have defaults.
- This is NOT called by rdheader. It is only automatically called by the gateway wrsamp for convenience.
- It is also not called by wrhea (this may be changed in the future) since
- it is supposed to be an explicit function.
-
- Not responsible for initializing the
- attributes. That is done by the constructor.
- """
- rfields, sfields = self.getwritefields()
- for f in rfields:
- self.setdefault(f)
- for f in sfields:
- self.setdefault(f)
-
- # Write a wfdb header file. The signals or segments fields are not used.
- def wrheader(self):
-
- # Get all the fields used to write the header
- recwritefields, sigwritefields = self.getwritefields()
-
- # Check the validity of individual fields used to write the header
-
- # Record specification fields (and comments)
- for f in recwritefields:
- self.checkfield(f)
-
- # Signal specification fields.
- for f in sigwritefields:
- self.checkfield(f, sigwritefields[f])
-
- # Check the cohesion of fields used to write the header
- self.checkfieldcohesion(recwritefields, list(sigwritefields))
-
- # Write the header file using the specified fields
- self.wrheaderfile(recwritefields, sigwritefields)
-
-
- # Get the list of fields used to write the header. (Does NOT include d_signals or e_d_signals.)
- # Separate items by record and signal specification field.
- # Returns the default required fields, the user defined fields, and their dependencies.
- # recwritefields includes 'comment' if present.
- def getwritefields(self):
-
- # Record specification fields
- recwritefields=self.getwritesubset('record')
-
- # Add comments if any
- if self.comments != None:
- recwritefields.append('comments')
-
- # Determine whether there are signals. If so, get their required fields.
- self.checkfield('nsig')
- if self.nsig>0:
- sigwritefields=self.getwritesubset('signal')
- else:
- sigwritefields = None
-
- return recwritefields, sigwritefields
-
- # Set the object's attribute to its default value if it is missing
- # and there is a default. Not responsible for initializing the
- # attribute. That is done by the constructor.
- def setdefault(self, field):
-
- # Record specification fields
- if field in recfieldspecs:
- # Return if no default to set, or if the field is already present.
- if recfieldspecs[field].write_def is None or getattr(self, field) is not None:
- return
- setattr(self, field, recfieldspecs[field].write_def)
-
- # Signal specification fields
- # Setting entire list default, not filling in blanks in lists.
- elif field in sigfieldspecs:
-
- # Specific dynamic case
- if field == 'filename' and self.filename is None:
- self.filename = self.nsig*[self.recordname+'.dat']
- return
-
- item = getattr(self, field)
-
- # Return if no default to set, or if the field is already present.
- if sigfieldspecs[field].write_def is None or item is not None:
- return
-
- # Set more specific defaults if possible
- if field == 'adcres' and self.fmt is not None:
- self.adcres=_signals.wfdbfmtres(self.fmt)
- return
-
- setattr(self, field, [sigfieldspecs[field].write_def]*self.nsig)
-
- # Check the cohesion of fields used to write the header
- def checkfieldcohesion(self, recwritefields, sigwritefields):
-
- # If there are no signal specification fields, there is nothing to check.
- if self.nsig>0:
-
- # The length of all signal specification fields must match nsig
- # even if some of its elements are None.
- for f in sigwritefields:
- if len(getattr(self, f)) != self.nsig:
- raise ValueError('The length of field: '+f+' must match field nsig.')
-
- # Each filename must correspond to only one fmt, (and only one byte offset if defined).
- datfmts = {}
- for ch in range(self.nsig):
- if self.filename[ch] not in datfmts:
- datfmts[self.filename[ch]] = self.fmt[ch]
- else:
- if datfmts[self.filename[ch]] != self.fmt[ch]:
- raise ValueError('Each filename (dat file) specified must have the same fmt')
-
- datoffsets = {}
- if self.byteoffset is not None:
- # At least one byte offset value exists
- for ch in range(self.nsig):
- if self.byteoffset[ch] is None:
- continue
- if self.filename[ch] not in datoffsets:
- datoffsets[self.filename[ch]] = self.byteoffset[ch]
- else:
- if datoffsets[self.filename[ch]] != self.byteoffset[ch]:
- raise ValueError('Each filename (dat file) specified must have the same byte offset')
-
-
- # Write a header file using the specified fields
- def wrheaderfile(self, recwritefields, sigwritefields):
-
- headerlines=[]
-
- # Create record specification line
- recordline = ''
- # Traverse the ordered dictionary
- for field in recfieldspecs:
- # If the field is being used, add it with its delimiter
- if field in recwritefields:
- stringfield = str(getattr(self, field))
- # If fs is float, check whether it as an integer
- if field == 'fs' and isinstance(self.fs, float):
- if round(self.fs, 8) == float(int(self.fs)):
- stringfield = str(int(self.fs))
- recordline = recordline + recfieldspecs[field].delimiter + stringfield
- headerlines.append(recordline)
-
- # Create signal specification lines (if any) one channel at a time
- if self.nsig>0:
- signallines = self.nsig*['']
- for ch in range(self.nsig):
- # Traverse the ordered dictionary
- for field in sigfieldspecs:
- # If the field is being used, add each of its elements with the delimiter to the appropriate line
- if field in sigwritefields and sigwritefields[field][ch]:
- signallines[ch]=signallines[ch] + sigfieldspecs[field].delimiter + str(getattr(self, field)[ch])
- # The 'baseline' field needs to be closed with ')'
- if field== 'baseline':
- signallines[ch]=signallines[ch] +')'
-
- headerlines = headerlines + signallines
-
- # Create comment lines (if any)
- if 'comments' in recwritefields:
- commentlines = ['# '+comment for comment in self.comments]
- headerlines = headerlines + commentlines
-
- linestofile(self.recordname+'.hea', headerlines)
-
-
-
-# Class with multi-segment header methods
-# To be inherited by WFDBmultirecord from records.py.
-class MultiHeadersMixin(BaseHeadersMixin):
-
- # Set defaults for fields needed to write the header if they have defaults.
- # This is NOT called by rdheader. It is only called by the gateway wrsamp for convenience.
- # It is also not called by wrhea (this may be changed in the future) since
- # it is supposed to be an explicit function.
-
- # Not responsible for initializing the
- # attribute. That is done by the constructor.
- def setdefaults(self):
- for field in self.getwritefields():
- self.setdefault(field)
-
- # Write a wfdb header file. The signals or segments fields are not used.
- def wrheader(self):
-
- # Get all the fields used to write the header
- writefields = self.getwritefields()
-
- # Check the validity of individual fields used to write the header
- for f in writefields:
- self.checkfield(f)
-
- # Check the cohesion of fields used to write the header
- self.checkfieldcohesion()
-
- # Write the header file using the specified fields
- self.wrheaderfile(writefields)
-
-
- # Get the list of fields used to write the multi-segment header.
- # Returns the default required fields, the user defined fields, and their dependencies.
- def getwritefields(self):
-
- # Record specification fields
- writefields=self.getwritesubset('record')
-
- # Segment specification fields are all mandatory
- writefields = writefields + ['segname', 'seglen']
-
- # Comments
- if self.comments !=None:
- writefields.append('comments')
- return writefields
-
- # Set a field to its default value if there is a default.
- def setdefault(self, field):
-
- # Record specification fields
- if field in recfieldspecs:
- # Return if no default to set, or if the field is already present.
- if recfieldspecs[field].write_def is None or getattr(self, field) is not None:
- return
- setattr(self, field, recfieldspecs[field].write_def)
-
-
-
- # Check the cohesion of fields used to write the header
- def checkfieldcohesion(self):
-
- # The length of segname and seglen must match nseg
- for f in ['segname', 'seglen']:
- if len(getattr(self, f)) != self.nseg:
- raise ValueError('The length of field: '+f+' does not match field nseg.')
-
- # Check the sum of the 'seglen' fields against 'siglen'
- if np.sum(self.seglen) != self.siglen:
- raise ValueError("The sum of the 'seglen' fields do not match the 'siglen' field")
-
-
- # Write a header file using the specified fields
- def wrheaderfile(self, writefields):
-
- headerlines=[]
-
- # Create record specification line
- recordline = ''
- # Traverse the ordered dictionary
- for field in recfieldspecs:
- # If the field is being used, add it with its delimiter
- if field in writefields:
- recordline = recordline + recfieldspecs[field].delimiter + str(getattr(self, field))
- headerlines.append(recordline)
-
- # Create segment specification lines
- segmentlines = self.nseg*['']
- # For both fields, add each of its elements with the delimiter to the appropriate line
- for field in ['segname', 'segname']:
- for segnum in range(0, self.nseg):
- segmentlines[segnum] = segmentlines[segnum] + segfieldspecs[field].delimiter + str(getattr(self, field)[segnum])
-
- headerlines = headerlines + segmentlines
-
- # Create comment lines (if any)
- if 'comments' in writefields:
- commentlines = ['# '+comment for comment in self.comments]
- headerlines = headerlines + commentlines
-
- linestofile(self.recordname+'.hea', headerlines)
-
- # Get a list of the segment numbers that contain a particular signal
- # (or a dictionary of segment numbers for a list of signals)
- # Only works if information about the segments has been read in
- def getsigsegments(self, signame=None):
- if self.segments is None:
- raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rdsegments=True")
-
- # Default value = all signal names.
- if signame is None:
- signame = self.getsignames()
-
- if isinstance(signame, list):
- sigdict = {}
- for sig in signame:
- sigdict[sig] = self.getsigsegments(sig)
- return sigdict
- elif isinstance(signame, str):
- sigsegs = []
- for i in range(self.nseg):
- if self.segname[i] != '~' and signame in self.segments[i].signame:
- sigsegs.append(i)
- return sigsegs
- else:
- raise TypeError('signame must be a string or a list of strings')
-
- # Get the signal names for the entire record
- def getsignames(self):
- if self.segments is None:
- raise Exception("The MultiRecord's segments must be read in before this method is called. ie. Call rdheader() with rdsegments=True")
-
- if self.layout == 'Fixed':
- for i in range(self.nseg):
- if self.segname[i] != '~':
- signame = self.segments[i].signame
- break
- else:
- signame = self.segments[0].signame
-
- return signame
-
-
-# Regexp objects for reading headers
-
-# Record Line Fields
-rxRECORD = re.compile(
- ''.join(
- [
- "(?P[-\w]+)/?(?P\d*)[ \t]+",
- "(?P\d+)[ \t]*",
- "(?P\d*\.?\d*)/*(?P\d*\.?\d*)\(?(?P\d*\.?\d*)\)?[ \t]*",
- "(?P\d*)[ \t]*",
- "(?P\d*:?\d{,2}:?\d{,2}\.?\d*)[ \t]*",
- "(?P\d{,2}/?\d{,2}/?\d{,4})"]))
-
-# Signal Line Fields
-rxSIGNAL = re.compile(
- ''.join(
- [
- "(?P[-\w]+\.?[\w]*~?)[ \t]+(?P\d+)x?"
- "(?P\d*):?(?P\d*)\+?(?P\d*)[ \t]*",
- "(?P-?\d*\.?\d*e?[\+-]?\d*)\(?(?P-?\d*)\)?/?(?P[\w\^\-\?%]*)[ \t]*",
- "(?P\d*)[ \t]*(?P-?\d*)[ \t]*(?P-?\d*)[ \t]*",
- "(?P-?\d*)[ \t]*(?P\d*)[ \t]*(?P[\S]?[^\t\n\r\f\v]*)"]))
-
-# Segment Line Fields
-rxSEGMENT = re.compile('(?P\w*~?)[ \t]+(?P\d+)')
-
-
-# Read header file to get comment and non-comment lines
-def getheaderlines(recordname, pbdir):
- # Read local file
- if pbdir is None:
- with open(recordname + ".hea", 'r') as fp:
- # Record line followed by signal/segment lines if any
- headerlines = []
- # Comment lines
- commentlines = []
- for line in fp:
- line = line.strip()
- # Comment line
- if line.startswith('#'):
- commentlines.append(line)
- # Non-empty non-comment line = header line.
- elif line:
- # Look for a comment in the line
- ci = line.find('#')
- if ci > 0:
- headerlines.append(line[:ci])
- # comment on same line as header line
- commentlines.append(line[ci:])
- else:
- headerlines.append(line)
- # Read online header file
- else:
- headerlines, commentlines = downloads.streamheader(recordname, pbdir)
-
- return headerlines, commentlines
-
-
-# Extract fields from a record line string into a dictionary
-def read_rec_line(recline):
-
- # Dictionary for record fields
- d_rec = {}
-
- # Read string fields from record line
- (d_rec['recordname'], d_rec['nseg'], d_rec['nsig'], d_rec['fs'],
- d_rec['counterfreq'], d_rec['basecounter'], d_rec['siglen'],
- d_rec['basetime'], d_rec['basedate']) = re.findall(rxRECORD, recline)[0]
-
- for field in recfieldspecs:
- # Replace empty strings with their read defaults (which are mostly None)
- if d_rec[field] == '':
- d_rec[field] = recfieldspecs[field].read_def
- # Typecast non-empty strings for numerical fields
- else:
- if recfieldspecs[field].allowedtypes is inttypes:
- d_rec[field] = int(d_rec[field])
- # fs may be read as float or int
- elif field == 'fs':
- fs = float(d_rec['fs'])
- if round(fs, 8) == float(int(fs)):
- fs = int(fs)
- d_rec['fs'] = fs
-
- return d_rec
-
-# Extract fields from signal line strings into a dictionary
-def read_sig_lines(siglines):
- # Dictionary for signal fields
- d_sig = {}
-
- # Each dictionary field is a list
- for field in sigfieldspecs:
- d_sig[field] = [None]*len(siglines)
-
- # Read string fields from signal line
- for i in range(0, len(siglines)):
- (d_sig['filename'][i], d_sig['fmt'][i],
- d_sig['sampsperframe'][i],
- d_sig['skew'][i],
- d_sig['byteoffset'][i],
- d_sig['adcgain'][i],
- d_sig['baseline'][i],
- d_sig['units'][i],
- d_sig['adcres'][i],
- d_sig['adczero'][i],
- d_sig['initvalue'][i],
- d_sig['checksum'][i],
- d_sig['blocksize'][i],
- d_sig['signame'][i]) = rxSIGNAL.findall(siglines[i])[0]
-
- for field in sigfieldspecs:
- # Replace empty strings with their read defaults (which are mostly None)
- # Note: Never set a field to None. [None]* nsig is accurate, indicating
- # that different channels can be present or missing.
- if d_sig[field][i] == '':
- d_sig[field][i] = sigfieldspecs[field].read_def
-
- # Special case: missing baseline defaults to ADCzero if present
- if field == 'baseline' and d_sig['adczero'][i] != '':
- d_sig['baseline'][i] = int(d_sig['adczero'][i])
- # Typecast non-empty strings for numerical fields
- else:
- if sigfieldspecs[field].allowedtypes is inttypes:
- d_sig[field][i] = int(d_sig[field][i])
- elif sigfieldspecs[field].allowedtypes is floattypes:
- d_sig[field][i] = float(d_sig[field][i])
- # Special case: gain of 0 means 200
- if field == 'adcgain' and d_sig['adcgain'][i] == 0:
- d_sig['adcgain'][i] = 200.
-
- return d_sig
-
-
-# Extract fields from segment line strings into a dictionary
-def read_seg_lines(seglines):
-
- # Dictionary for signal fields
- d_seg = {}
-
- # Each dictionary field is a list
- for field in segfieldspecs:
- d_seg[field] = [None]*len(seglines)
-
- # Read string fields from signal line
- for i in range(0, len(seglines)):
- (d_seg['segname'][i], d_seg['seglen'][i]) = rxSEGMENT.findall(seglines[i])[0]
-
- for field in segfieldspecs:
- # Replace empty strings with their read defaults (which are mostly None)
- if d_seg[field][i] == '':
- d_seg[field][i] = segfieldspecs[field].read_def
- # Typecast non-empty strings for numerical field
- else:
- if field == 'seglen':
- d_seg[field][i] = int(d_seg[field][i])
-
- return d_seg
-
-# Write each line in a list of strings to a text file
-def linestofile(filename, lines):
- f = open(filename,'w')
- for l in lines:
- f.write("%s\n" % l)
- f.close()
-
-
-# Specifications of WFDB header fields.
-class WFDBheaderspecs():
-
- def __init__(self, allowedtypes, delimiter, dependency, write_req, read_def, write_def):
- # Data types the field (or its elements) can be
- self.allowedtypes = allowedtypes
- # The text delimiter that preceeds the field if it is a field that gets written to header files.
- self.delimiter = delimiter
- # The required/dependent field which must also be present
- self.dependency = dependency
- # Whether the field is always required for writing a header (more stringent than origin WFDB library)
- self.write_req = write_req
- # The default value for the field when read if any
- self.read_def = read_def
- # The default value for the field to fill in before writing if any
- self.write_def = write_def
-
- # The read vs write default values are different for 2 reasons:
- # 1. We want to force the user to be explicit with certain important
- # fields when writing WFDB records fields, without affecting
- # existing WFDB headers when reading.
- # 2. Certain unimportant fields may be dependencies of other
- # important fields. When writing, we want to fill in defaults
- # so that the user doesn't need to. But when reading, it should
- # be clear that the fields are missing.
-
-inttypes = (int, np.int64, np.int32, np.int16, np.int8)
-floattypes = inttypes + (float, np.float64, np.float32)
-int_dtypes = ('int64', 'uint64', 'int32', 'uint32','int16','uint16')
-
-# Record specification fields
-recfieldspecs = OrderedDict([('recordname', WFDBheaderspecs((str), '', None, True, None, None)),
- ('nseg', WFDBheaderspecs(inttypes, '/', 'recordname', True, None, None)),
- ('nsig', WFDBheaderspecs(inttypes, ' ', 'recordname', True, None, None)),
- ('fs', WFDBheaderspecs(floattypes, ' ', 'nsig', True, 250, None)),
- ('counterfreq', WFDBheaderspecs(floattypes, '/', 'fs', False, None, None)),
- ('basecounter', WFDBheaderspecs(floattypes, '(', 'counterfreq', False, None, None)),
- ('siglen', WFDBheaderspecs(inttypes, ' ', 'fs', True, None, None)),
- ('basetime', WFDBheaderspecs((str), ' ', 'siglen', False, None, '00:00:00')),
- ('basedate', WFDBheaderspecs((str), ' ', 'basetime', False, None, None))])
-
-# Signal specification fields.
-sigfieldspecs = OrderedDict([('filename', WFDBheaderspecs((str), '', None, True, None, None)),
- ('fmt', WFDBheaderspecs((str), ' ', 'filename', True, None, None)),
- ('sampsperframe', WFDBheaderspecs(inttypes, 'x', 'fmt', False, 1, None)),
- ('skew', WFDBheaderspecs(inttypes, ':', 'fmt', False, None, None)),
- ('byteoffset', WFDBheaderspecs(inttypes, '+', 'fmt', False, None, None)),
- ('adcgain', WFDBheaderspecs(floattypes, ' ', 'fmt', True, 200., None)),
- ('baseline', WFDBheaderspecs(inttypes, '(', 'adcgain', True, 0, None)),
- ('units', WFDBheaderspecs((str), '/', 'adcgain', True, 'mV', None)),
- ('adcres', WFDBheaderspecs(inttypes, ' ', 'adcgain', False, None, 0)),
- ('adczero', WFDBheaderspecs(inttypes, ' ', 'adcres', False, None, 0)),
- ('initvalue', WFDBheaderspecs(inttypes, ' ', 'adczero', False, None, None)),
- ('checksum', WFDBheaderspecs(inttypes, ' ', 'initvalue', False, None, None)),
- ('blocksize', WFDBheaderspecs(inttypes, ' ', 'checksum', False, None, 0)),
- ('signame', WFDBheaderspecs((str), ' ', 'blocksize', False, None, None))])
-
-# Segment specification fields.
-segfieldspecs = OrderedDict([('segname', WFDBheaderspecs((str), '', None, True, None, None)),
- ('seglen', WFDBheaderspecs(inttypes, ' ', 'segname', True, None, None))])
-
-
-
-# ---------- For storing WFDB Signal definitions ---------- #
-
-
-# Unit scales used for default display scales.
-unitscale = {
- 'Voltage': ['pV', 'nV', 'uV', 'mV', 'V', 'kV'],
- 'Temperature': ['C'],
- 'Pressure': ['mmHg'],
-}
-
-
-
-# Signal class with all its parameters
-class SignalClass(object):
- def __init__(self, abbreviation, description, signalnames):
- self.abbreviation = abbreviation
- self.description = description
- # names that are assigned to this signal type
- self.signalnames = signalnames
-
- def __str__(self):
- return self.abbreviation
-
-# All signal types. Make sure signal names are in lower case.
-sig_classes = [
- SignalClass('BP', 'Blood Pressure', ['bp','abp','pap','cvp',]),
- SignalClass('CO2', 'Carbon Dioxide', ['co2']),
- SignalClass('CO', 'Carbon Monoxide', ['co']),
- SignalClass('ECG', 'Electrocardiogram', ['i','ii','iii','iv','v','avr']),
- SignalClass('EEG', 'Electroencephalogram',['eeg']),
- SignalClass('EMG', 'Electromyograph', ['emg']),
- SignalClass('EOG', 'Electrooculograph', ['eog']),
- SignalClass('HR', 'Heart Rate', ['hr']),
- SignalClass('MMG', 'Magnetomyograph', ['mmg']),
- SignalClass('O2', 'Oxygen', ['o2','sp02']),
- SignalClass('PLETH', 'Plethysmograph', ['pleth']),
- SignalClass('RESP', 'Respiration', ['resp']),
- SignalClass('SCG', 'Seismocardiogram', ['scg']),
- SignalClass('STAT', 'Status', ['stat','status']), # small integers indicating status
- SignalClass('ST', 'ECG ST Segment', ['st']),
- SignalClass('TEMP', 'Temperature', ['temp']),
- SignalClass('UNKNOWN', 'Unknown Class', []),
-]
-
-
diff --git a/wfdb/readwrite/downloads.py b/wfdb/readwrite/downloads.py
deleted file mode 100644
index fc38c014..00000000
--- a/wfdb/readwrite/downloads.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import numpy as np
-import re
-import os
-import posixpath
-import requests
-
-# Read a header file from physiobank
-def streamheader(recordname, pbdir):
-
- # Full url of header location
- url = posixpath.join(dbindexurl, pbdir, recordname+'.hea')
- r = requests.get(url)
-
- # Raise HTTPError if invalid url
- r.raise_for_status()
-
- # Get each line as a string
- filelines = r.content.decode('iso-8859-1').splitlines()
-
- # Separate content into header and comment lines
- headerlines = []
- commentlines = []
-
- for line in filelines:
- line = str(line.strip())
- # Comment line
- if line.startswith('#'):
- commentlines.append(line)
- # Non-empty non-comment line = header line.
- elif line:
- # Look for a comment in the line
- ci = line.find('#')
- if ci > 0:
- headerlines.append(line[:ci])
- # comment on same line as header line
- commentlines.append(line[ci:])
- else:
- headerlines.append(line)
-
- return (headerlines, commentlines)
-
-# Read certain bytes from a dat file from physiobank
-def streamdat(filename, pbdir, fmt, bytecount, startbyte, datatypes):
-
- # Full url of dat file
- url = posixpath.join(dbindexurl, pbdir, filename)
-
- # Specify the byte range
- endbyte = startbyte + bytecount-1
- headers = {"Range": "bytes="+str(startbyte)+"-"+str(endbyte), 'Accept-Encoding': '*/*'}
-
- # Get the content
- r = requests.get(url, headers=headers, stream=True)
-
- # Raise HTTPError if invalid url
- r.raise_for_status()
-
- sigbytes = r.content
-
- # Convert to numpy array
- sigbytes = np.fromstring(sigbytes, dtype = np.dtype(datatypes[fmt]))
-
- return sigbytes
-
-# Read an entire annotation file from physiobank
-def streamannotation(filename, pbdir):
-
- # Full url of annotation file
- url = posixpath.join(dbindexurl, pbdir, filename)
-
- # Get the content
- r = requests.get(url)
- # Raise HTTPError if invalid url
- r.raise_for_status()
-
- annbytes = r.content
-
- # Convert to numpy array
- annbytes = np.fromstring(annbytes, dtype = np.dtype(' onlinefilesize:
- dlfullfile(url, localfile)
- # If they're the same size, do nothing.
-
- # The file doesn't exist. Download it.
- else:
- dlfullfile(url, localfile)
-
- return
-
-# Download a file. No checks.
-def dlfullfile(url, localfile):
- r = requests.get(url)
- with open(localfile, "wb") as writefile:
- writefile.write(r.content)
-
- return
-
-
-
-
-dbindexurl = 'http://physionet.org/physiobank/database/'
diff --git a/wfdb/readwrite/records.py b/wfdb/readwrite/records.py
deleted file mode 100644
index 4a7a96b6..00000000
--- a/wfdb/readwrite/records.py
+++ /dev/null
@@ -1,1373 +0,0 @@
-# For wrheader(), all fields must be already filled in and cohesive with one another other. The signals field will not be used.
-# For wrsamp(), the field to use will be d_signals (which is allowed to be empty for 0 channel records).
-# set_p_features and set_d_features use characteristics of the p_signals or d_signals field to fill in other header fields.
-# These are separate from another method 'setdefaults' which the user may call to set default header fields
-# The checkfieldcohesion() function will be called in wrheader which checks all the header fields.
-# The checksignalcohesion() function will be called in wrsamp in wrdat to check the d_signal against the header fields.
-
-import numpy as np
-import re
-import os
-import posixpath
-from collections import OrderedDict
-from calendar import monthrange
-import requests
-import multiprocessing
-from . import _headers
-from . import _signals
-from . import downloads
-
-
-# The base WFDB class to extend to create Record and MultiRecord. Contains shared helper functions and fields.
-class BaseRecord(object):
- # Constructor
- def __init__(self, recordname=None, nsig=None,
- fs=None, counterfreq=None, basecounter = None,
- siglen = None, basetime = None, basedate = None,
- comments = None, signame=None):
- self.recordname = recordname
- self.nsig = nsig
- self.fs = fs
- self.counterfreq = counterfreq
- self.basecounter = basecounter
- self.siglen = siglen
- self.basetime = basetime
- self.basedate = basedate
- self.comments = comments
- self.signame = signame
-
- # Check whether a single field is valid in its basic form. Does not check compatibility with other fields.
- # ch is only used for signal specification fields, specifying the channels to check. Other channels
- # can be None.
- # Be aware that this function is not just called from wrheader.
- def checkfield(self, field, channels=None):
- # Check that the field is present
- if getattr(self, field) is None:
- raise Exception("Missing field required: "+field)
-
- # Check the type of the field (and of its elements if it should be a list)
- self.checkfieldtype(field, channels)
-
- # Expand to make sure all channels must have present field
- if channels == 'all':
- channels = [1]*len(getattr(self, field))
-
- # Individual specific field checks:
- if field == 'd_signals':
- # Check shape
- if self.d_signals.ndim != 2:
- raise TypeError("d_signals must be a 2d numpy array")
- # Check dtype
- if self.d_signals.dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
- raise TypeError('d_signals must be a 2d numpy array with dtype == int64, int32, int16, or int8.')
- elif field =='p_signals':
- # Check shape
- if self.p_signals.ndim != 2:
- raise TypeError("p_signals must be a 2d numpy array")
-
- elif field == 'e_d_signals':
- # Check shape
- for ch in range(len(channels)):
- if self.e_d_signals[ch].ndim != 1:
- raise TypeError("e_d_signals must be a list of 1d numpy arrays")
- # Check dtype
- if self.e_d_signals[ch].dtype not in [np.dtype('int64'), np.dtype('int32'), np.dtype('int16'), np.dtype('int8')]:
- raise TypeError('e_d_d_signals must be a list of 1d numpy arrays with dtype == int64, int32, int16, or int8.')
- elif field =='e_p_signals':
- # Check shape
- for ch in range(0, len(channels)):
- if self.e_p_signals.ndim != 1:
- raise TypeError("e_p_signals must be a list of 1d numpy arrays")
-
- #elif field == 'segments': # Nothing to check here.
- # Record specification fields
- elif field == 'recordname':
- # Allow letters, digits, hyphens, and underscores.
- acceptedstring = re.match('[-\w]+', self.recordname)
- if not acceptedstring or acceptedstring.string != self.recordname:
- raise ValueError('recordname must only comprise of letters, digits, hyphens, and underscores.')
- elif field == 'nseg':
- if self.nseg <=0:
- raise ValueError('nseg must be a positive integer')
- elif field == 'nsig':
- if self.nsig <=0:
- raise ValueError('nsig must be a positive integer')
- elif field == 'fs':
- if self.fs<=0:
- raise ValueError('fs must be a positive number')
- elif field == 'counterfreq':
- if self.counterfreq <=0:
- raise ValueError('counterfreq must be a positive number')
- elif field == 'basecounter':
- if self.basecounter <=0:
- raise ValueError('basecounter must be a positive number')
- elif field == 'siglen':
- if self.siglen <0:
- raise ValueError('siglen must be a non-negative integer')
- elif field == 'basetime':
- _ = parsetimestring(self.basetime)
- elif field == 'basedate':
- _ = parsedatestring(self.basedate)
-
- # Signal specification fields. Lists of elements to check.
- elif field in _headers.sigfieldspecs:
-
- for ch in range(0, len(channels)):
- f = getattr(self, field)[ch]
-
- # The channel element is allowed to be None
- if not channels[ch]:
- if f is None:
- continue
-
- if field == 'filename':
- # Check for filename characters
- acceptedstring = re.match('[-\w]+\.?[\w]+',f)
- if not acceptedstring or acceptedstring.string != f:
- raise ValueError('File names should only contain alphanumerics, hyphens, and an extension. eg. record_100.dat')
- # Check that dat files are grouped together
- if orderedsetlist(self.filename)[0] != orderednoconseclist(self.filename):
- raise ValueError('filename error: all entries for signals that share a given file must be consecutive')
- elif field == 'fmt':
- if f not in _signals.datformats:
- raise ValueError('File formats must be valid WFDB dat formats: '+' , '.join(_signals.datformats))
- elif field == 'sampsperframe':
- if f < 1:
- raise ValueError('sampsperframe values must be positive integers')
- elif field == 'skew':
- if f < 0:
- raise ValueError('skew values must be non-negative integers')
- elif field == 'byteoffset':
- if f < 0:
- raise ValueError('byteoffset values must be non-negative integers')
- elif field == 'adcgain':
- if f <= 0:
- raise ValueError('adcgain values must be positive numbers')
- elif field == 'baseline':
- # Currently original WFDB library only has 4 bytes for baseline.
- if f < -2147483648 or f> 2147483648:
- raise ValueError('baseline values must be between -2147483648 (-2^31) and 2147483647 (2^31 -1)')
- elif field == 'units':
- if re.search('\s', f):
- raise ValueError('units strings may not contain whitespaces.')
- elif field == 'adcres':
- if f < 0:
- raise ValueError('adcres values must be non-negative integers')
- # elif field == 'adczero': nothing to check here
- # elif field == 'initvalue': nothing to check here
- # elif field == 'checksum': nothing to check here
- elif field == 'blocksize':
- if f < 0:
- raise ValueError('blocksize values must be non-negative integers')
- elif field == 'signame':
- if re.search('\s', f):
- raise ValueError('signame strings may not contain whitespaces.')
- if len(set(self.signame)) != len(self.signame):
- raise ValueError('signame strings must be unique.')
-
- # Segment specification fields
- elif field == 'segname':
- # Segment names must be alphanumerics or just a single '~'
- for f in self.segname:
- if f == '~':
- continue
- acceptedstring = re.match('[-\w]+',f)
- if not acceptedstring or acceptedstring.string != f:
- raise ValueError("Non-null segment names may only contain alphanumerics and dashes. Null segment names must be set to '~'")
- elif field == 'seglen':
- # For records with more than 1 segment, the first segment may be
- # the layout specification segment with a length of 0
- if len(self.seglen)>1:
- if self.seglen[0] < 0:
- raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
- sl = self.seglen[1:]
- else:
- sl = self.seglen
- for f in sl:
- if f < 1:
- raise ValueError('seglen values must be positive integers. Only seglen[0] may be 0 to indicate a layout segment')
- # Comment field
- elif field == 'comments':
- for f in self.comments:
- if f=='': # Allow empty string comment lines
- continue
- if f[0] == '#':
- print("Note: comment strings do not need to begin with '#'. This library adds them automatically.")
- if re.search('[\t\n\r\f\v]', f):
- raise ValueError('comments may not contain tabs or newlines (they may contain spaces and underscores).')
- # Check the data type of the specified field.
- # ch is used for signal spec fields
- # Some fields are lists. This must be checked, along with their elements.
- def checkfieldtype(self, field, ch=None):
-
- item = getattr(self, field)
-
- # Record specification field. Nonlist.
- if field in _headers.recfieldspecs:
- checkitemtype(item, field, _headers.recfieldspecs[field].allowedtypes)
-
- # Signal specification field. List.
- elif field in _headers.sigfieldspecs:
- checkitemtype(item, field, _headers.sigfieldspecs[field].allowedtypes, ch)
-
- # Segment specification field. List. All elements cannot be None
- elif field in _headers.segfieldspecs:
- checkitemtype(item, field, _headers.segfieldspecs[field].allowedtypes, 'all')
-
- # Comments field. List. Elements cannot be None
- elif field == 'comments':
- checkitemtype(item, field, (str), 'all')
-
- # Signals field.
- elif field in ['p_signals','d_signals']:
- checkitemtype(item, field, (np.ndarray))
-
- elif field in ['e_p_signals', 'e_d_signals']:
- checkitemtype(item, field, (np.ndarray), 'all')
-
- # Segments field. List. Elements may be None.
- elif field == 'segments':
- checkitemtype(item, field, (Record), 'none')
-
- # Ensure that input read parameters are valid for the record
- def checkreadinputs(self, sampfrom, sampto, channels, physical, m2s, smoothframes, returnres):
- # Data Type Check
- if not hasattr(sampfrom, '__index__'):
- raise TypeError('sampfrom must be an integer')
- if not hasattr(sampto, '__index__'):
- raise TypeError('sampto must be an integer')
-
- if not isinstance(channels, list):
- raise TypeError('channels must be a list of integers')
-
- # Duration Ranges
- if sampfrom<0:
- raise ValueError('sampfrom must be a non-negative integer')
- if sampfrom>self.siglen:
- raise ValueError('sampfrom must be shorter than the signal length')
- if sampto<0:
- raise ValueError('sampto must be a non-negative integer')
- if sampto>self.siglen:
- raise ValueError('sampto must be shorter than the signal length')
- if sampto<=sampfrom:
- raise ValueError('sampto must be greater than sampfrom')
-
- # Channel Ranges
- for c in channels:
- if c<0:
- raise ValueError('Input channels must all be non-negative integers')
- if c>self.nsig-1:
- raise ValueError('Input channels must all be lower than the total number of channels')
-
- if returnres not in [64, 32, 16, 8]:
- raise ValueError("returnres must be one of the following: 64, 32, 16, 8")
- if physical is True and returnres == 8:
- raise ValueError("returnres must be one of the following when physical is True: 64, 32, 16")
-
- # Cannot expand multiple samples/frame for multi-segment records
- if isinstance(self, MultiRecord):
-
- # If m2s == True, Physical must be true. There is no
- # meaningful representation of digital signals transferred
- # from individual segments.
- if m2s is True and physical is not True:
- raise Exception('If m2s is True, physical must also be True.')
-
- if smoothframes is False:
- raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.')
-
-# Check the item type. Vary the print message regarding whether the item can be None.
-# Helper to checkfieldtype
-# channels is a list of booleans indicating whether the field's channel must be present (1) or may be None (0)
-# and is not just for signal specification fields
-def checkitemtype(item, field, allowedtypes, channels=None):
-
- # Checking the list
- if channels is not None:
-
- # First make sure the item is a list
- if not isinstance(item, list):
- raise TypeError("Field: '"+field+"' must be a list")
-
- # Expand to make sure all channels must have present field
- if channels == 'all':
- channels = [1]*len(item)
-
- # Expand to allow any channel to be None
- if channels == 'none':
- channels = [0]*len(item)
-
- for ch in range(0, len(channels)):
-
- mustexist=channels[ch]
- # The field must exist for the channel
- if mustexist:
- if not isinstance(item[ch], allowedtypes):
- raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be one of the following types:", allowedtypes)
-
- # The field may be None for the channel
- else:
- if not isinstance(item[ch], allowedtypes) and item[ch] is not None:
- raise TypeError("Channel "+str(ch)+" of field: '"+field+"' must be a 'None', or one of the following types:", allowedtypes)
-
- # Single scalar to check
- else:
- if not isinstance(item, allowedtypes):
- raise TypeError("Field: '"+field+"' must be one of the following types:", allowedtypes)
-
-
-
-class Record(BaseRecord, _headers.HeadersMixin, _signals.SignalsMixin):
- """
- The class representing WFDB headers, and single segment WFDB records.
-
- Record objects can be created using the constructor, by reading a WFDB header
- with 'rdheader', or a WFDB record (header and associated dat files) with rdsamp'
- or 'srdsamp'.
-
- The attributes of the Record object give information about the record as specified
- by https://www.physionet.org/physiotools/wag/header-5.htm
-
- In addition, the d_signals and p_signals attributes store the digital and physical
- signals of WFDB records with at least one channel.
-
- Contructor function:
- def __init__(self, p_signals=None, d_signals=None,
- recordname=None, nsig=None,
- fs=None, counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- filename=None, fmt=None, sampsperframe=None,
- skew=None, byteoffset=None, adcgain=None,
- baseline=None, units=None, adcres=None,
- adczero=None, initvalue=None, checksum=None,
- blocksize=None, signame=None, comments=None)
-
- Example Usage:
- import wfdb
- record = wfdb.Record(recordname='r1', fs=250, nsig=2, siglen=1000, filename=['r1.dat','r1.dat'])
-
- """
- # Constructor
- def __init__(self, p_signals=None, d_signals=None,
- e_p_signals=None, e_d_signals=None,
- recordname=None, nsig=None,
- fs=None, counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- filename=None, fmt=None, sampsperframe=None,
- skew=None, byteoffset=None, adcgain=None,
- baseline=None, units=None, adcres=None,
- adczero=None, initvalue=None, checksum=None,
- blocksize=None, signame=None, comments=None):
-
- # Note the lack of 'nseg' field. Single segment records cannot have this field. Even nseg = 1 makes
- # the header a multi-segment header.
-
- super(Record, self).__init__(recordname, nsig,
- fs, counterfreq, basecounter, siglen,
- basetime, basedate, comments, signame)
-
- self.p_signals = p_signals
- self.d_signals = d_signals
- self.e_p_signals = e_p_signals
- self.e_d_signals = e_d_signals
-
-
- self.filename=filename
- self.fmt=fmt
- self.sampsperframe=sampsperframe
- self.skew=skew
- self.byteoffset=byteoffset
- self.adcgain=adcgain
- self.baseline=baseline
- self.units=units
- self.adcres=adcres
- self.adczero=adczero
- self.initvalue=initvalue
- self.checksum=checksum
- self.blocksize=blocksize
-
- # Equal comparison operator for objects of this type
- def __eq__(self, other):
- att1 = self.__dict__
- att2 = other.__dict__
-
- if set(att1.keys()) != set(att2.keys()):
- return False
-
- for k in att1.keys():
-
- v1 = att1[k]
- v2 = att2[k]
-
- if type(v1) != type(v2):
- return False
-
- if type(v1) == np.ndarray:
- if not np.array_equal(v1, v2):
- return False
- else:
- if v1 != v2:
- return False
-
- return True
-
- # Write a wfdb header file and associated dat files if any.
- # Uses d_signals (expanded=False) or e_d_signals to write the samples
- def wrsamp(self, expanded=False):
-
- # Perform field validity and cohesion checks, and write the header file.
- self.wrheader()
- if self.nsig>0:
- # Perform signal validity and cohesion checks, and write the associated dat files.
- self.wrdats(expanded)
-
-
- # Arrange/edit object fields to reflect user channel and/or signal range input
- # Account for case when signals are expanded
- def arrangefields(self, channels, expanded=False):
-
- # Rearrange signal specification fields
- for field in _headers.sigfieldspecs:
- item = getattr(self, field)
- setattr(self, field, [item[c] for c in channels])
-
- # Expanded signals - multiple samples per frame.
- if expanded:
- # Checksum and initvalue to be updated if present
- # unless the whole signal length was input
- if self.siglen != int(len(self.e_d_signals[0])/self.sampsperframe[0]):
- self.checksum = self.calc_checksum(expanded)
- self.initvalue = [s[0] for s in self.e_d_signals]
-
- self.nsig = len(channels)
- self.siglen = int(len(self.e_d_signals[0])/self.sampsperframe[0])
-
- # MxN numpy array d_signals
- else:
- # Checksum and initvalue to be updated if present
- # unless the whole signal length was input
- if self.siglen != self.d_signals.shape[0]:
-
- if self.checksum is not None:
- self.checksum = self.calc_checksum()
- if self.initvalue is not None:
- ival = list(self.d_signals[0, :])
- self.initvalue = [int(i) for i in ival]
-
- # Update record specification parameters
- # Important that these get updated after^^
- self.nsig = len(channels)
- self.siglen = self.d_signals.shape[0]
-
-
-
-# Class for multi segment WFDB records.
-class MultiRecord(BaseRecord, _headers.MultiHeadersMixin):
- """
- The class representing multi-segment WFDB records.
-
- MultiRecord objects can be created using the constructor, or by reading a multi-segment
- WFDB record using 'rdsamp' with the 'm2s' (multi to single) input parameter set to False.
-
- The attributes of the MultiRecord object give information about the entire record as specified
- by https://www.physionet.org/physiotools/wag/header-5.htm
-
- In addition, the 'segments' parameter is a list of Record objects representing each
- individual segment, or 'None' representing empty segments, of the entire multi-segment record.
-
- Noteably, this class has no attribute representing the signals as a whole. The 'multi_to_single'
- instance method can be called on MultiRecord objects to return a single segment representation
- of the record as a Record object. The resulting Record object will have its 'p_signals' field set.
-
- Contructor function:
- def __init__(self, segments=None, layout=None,
- recordname=None, nsig=None, fs=None,
- counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- segname=None, seglen=None, comments=None,
- signame=None, sigsegments=None)
-
- Example Usage:
- import wfdb
- recordM = wfdb.MultiRecord(recordname='rm', fs=50, nsig=8, siglen=9999,
- segname=['rm_1', '~', rm_2'], seglen=[800, 200, 900])
-
- recordL = wfdb.rdsamp('s00001-2896-10-10-00-31', m2s = False)
- recordL = recordL.multi_to_single()
- """
-
- # Constructor
- def __init__(self, segments=None, layout=None,
- recordname=None, nsig=None, fs=None,
- counterfreq=None, basecounter=None,
- siglen=None, basetime=None, basedate=None,
- segname=None, seglen=None, comments=None,
- signame=None, sigsegments=None):
-
-
- super(MultiRecord, self).__init__(recordname, nsig,
- fs, counterfreq, basecounter, siglen,
- basetime, basedate, comments, signame)
-
- self.layout = layout
- self.segments = segments
- self.segname = segname
- self.seglen = seglen
- self.sigsegments=sigsegments
-
- # Write a multi-segment header, along with headers and dat files for all segments
- def wrsamp(self):
- # Perform field validity and cohesion checks, and write the header file.
- self.wrheader()
- # Perform record validity and cohesion checks, and write the associated segments.
- for seg in self.segments:
- seg.wrsamp()
-
-
- # Check the cohesion of the segments field with other fields used to write the record
- def checksegmentcohesion(self):
-
- # Check that nseg is equal to the length of the segments field
- if self.nseg != len(self.segments):
- raise ValueError("Length of segments must match the 'nseg' field")
-
- for i in range(0, nseg):
- s = self.segments[i]
-
- # If segment 0 is a layout specification record, check that its file names are all == '~''
- if i==0 and self.seglen[0] == 0:
- for filename in s.filename:
- if filename != '~':
- raise ValueError("Layout specification records must have all filenames named '~'")
-
- # Check that sampling frequencies all match the one in the master header
- if s.fs != self.fs:
- raise ValueError("The 'fs' in each segment must match the overall record's 'fs'")
-
- # Check the signal length of the segment against the corresponding seglen field
- if s.siglen != self.seglen[i]:
- raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length')
-
- totalsiglen = totalsiglen + getattr(s, 'siglen')
-
- # No need to check the sum of siglens from each segment object against siglen
- # Already effectively done it when checking sum(seglen) against siglen
-
-
- # Determine the segments and the samples
- # within each segment that have to be read in a
- # multi-segment record. Called during rdsamp.
- def requiredsegments(self, sampfrom, sampto, channels):
-
- # The starting segment with actual samples
- if self.layout == 'Fixed':
- startseg = 0
- else:
- startseg = 1
-
- # Cumulative sum of segment lengths (ignoring layout segment)
- cumsumlengths = list(np.cumsum(self.seglen[startseg:]))
- # Get first segment
- readsegs = [[sampfrom < cs for cs in cumsumlengths].index(True)]
- # Get final segment
- if sampto == cumsumlengths[len(cumsumlengths) - 1]:
- readsegs.append(len(cumsumlengths) - 1)
- else:
- readsegs.append([sampto <= cs for cs in cumsumlengths].index(True))
-
- # Add 1 for variable layout records
- readsegs = list(np.add(readsegs,startseg))
-
- # Obtain the sampfrom and sampto to read for each segment
- if readsegs[1] == readsegs[0]:
- # Only one segment to read
- readsegs = [readsegs[0]]
- # The segment's first sample number relative to the entire record
- segstartsamp = sum(self.seglen[0:readsegs[0]])
- readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]
-
- else:
- # More than one segment to read
- readsegs = list(range(readsegs[0], readsegs[1]+1))
- readsamps = [[0, self.seglen[s]] for s in readsegs]
-
- # Starting sample for first segment.
- readsamps[0][0] = sampfrom - ([0] + cumsumlengths)[readsegs[0]-startseg]
-
- # End sample for last segment
- readsamps[-1][1] = sampto - ([0] + cumsumlengths)[readsegs[-1]-startseg]
-
- return (readsegs, readsamps)
-
- # Get the channel numbers to be read from each segment
- def requiredsignals(self, readsegs, channels, dirname, pbdir):
-
- # Fixed layout. All channels are the same.
- if self.layout == 'Fixed':
- # Should we bother here with skipping empty segments?
- # They won't be read anyway.
- readsigs = [channels]*len(readsegs)
- # Variable layout: figure out channels by matching record names
- else:
- readsigs = []
- # The overall layout signal names
- l_signames = self.segments[0].signame
- # The wanted signals
- w_signames = [l_signames[c] for c in channels]
-
- # For each segment ...
- for i in range(0, len(readsegs)):
- # Skip empty segments
- if self.segname[readsegs[i]] == '~':
- readsigs.append(None)
- else:
- # Get the signal names of the current segment
- s_signames = rdheader(os.path.join(dirname, self.segname[readsegs[i]]), pbdir = pbdir).signame
- readsigs.append(wanted_siginds(w_signames, s_signames))
-
- return readsigs
-
- # Arrange/edit object fields to reflect user channel and/or signal range input
- def arrangefields(self, readsegs, segranges, channels):
-
- # Update seglen values for relevant segments
- for i in range(0, len(readsegs)):
- self.seglen[readsegs[i]] = segranges[i][1] - segranges[i][0]
-
- # Update record specification parameters
- self.nsig = len(channels)
- self.siglen = sum([sr[1]-sr[0] for sr in segranges])
-
- # Get rid of the segments and segment line parameters
- # outside the desired segment range
- if self.layout == 'Fixed':
- self.segments = self.segments[readsegs[0]:readsegs[-1]+1]
- self.segname = self.segname[readsegs[0]:readsegs[-1]+1]
- self.seglen = self.seglen[readsegs[0]:readsegs[-1]+1]
- else:
- # Keep the layout specifier segment
- self.segments = [self.segments[0]] + self.segments[readsegs[0]:readsegs[-1]+1]
- self.segname = [self.segname[0]] + self.segname[readsegs[0]:readsegs[-1]+1]
- self.seglen = [self.seglen[0]] + self.seglen[readsegs[0]:readsegs[-1]+1]
-
- # Update number of segments
- self.nseg = len(self.segments)
-
- # Convert a MultiRecord object to a Record object
- def multi_to_single(self, returnres):
-
- # The fields to transfer to the new object
- fields = self.__dict__.copy()
-
- # Remove multirecord fields
- del(fields['segments'])
- del(fields['segname'])
- del(fields['seglen'])
- del(fields['nseg'])
-
- # The output physical signals
- if returnres == 64:
- floatdtype = 'float64'
- elif returnres == 32:
- floatdtype = 'float32'
- else:
- floatdtype = 'float16'
-
-
- p_signals = np.zeros([self.siglen, self.nsig], dtype=floatdtype)
-
- # Get the physical samples from each segment
-
- # Start and end samples in the overall array
- # to place the segment samples into
- startsamps = [0] + list(np.cumsum(self.seglen)[0:-1])
- endsamps = list(np.cumsum(self.seglen))
-
- if self.layout == 'Fixed':
- # Get the signal names and units from the first segment
- fields['signame'] = self.segments[0].signame
- fields['units'] = self.segments[0].units
-
- for i in range(self.nseg):
- p_signals[startsamps[i]:endsamps[i],:] = self.segments[i].p_signals
- # For variable layout, have to get channels by name
- else:
- # Get the signal names from the layout segment
- fields['signame'] = self.segments[0].signame
- fields['units'] = self.segments[0].units
-
- for i in range(1, self.nseg):
- seg = self.segments[i]
-
- # Empty segment
- if seg is None:
- p_signals[startsamps[i]:endsamps[i],:] = np.nan
- # Non-empty segment
- else:
- # Figure out if there are any channels wanted and
- # the output channels they are to be stored in
- inchannels = []
- outchannels = []
- for s in fields['signame']:
- if s in seg.signame:
- inchannels.append(seg.signame.index(s))
- outchannels.append(fields['signame'].index(s))
-
- # Segment contains no wanted channels. Fill with nans.
- if inchannels == []:
- p_signals[startsamps[i]:endsamps[i],:] = np.nan
- # Segment contains wanted channel(s). Transfer samples.
- else:
- # This statement is necessary in case this function is not called
- # directly from rdsamp with m2s=True.
- if not hasattr(seg, 'p_signals'):
- seg.p_signals = seg.dac(returnres=returnres)
- for ch in range(0, fields['nsig']):
- if ch not in outchannels:
- p_signals[startsamps[i]:endsamps[i],ch] = np.nan
- else:
- p_signals[startsamps[i]:endsamps[i],ch] = seg.p_signals[:, inchannels[outchannels.index(ch)]]
-
- # Create the single segment Record object and set attributes
- record = Record()
- for field in fields:
- setattr(record, field, fields[field])
- record.p_signals = p_signals
-
- return record
-
-
-#------------------- Reading Records -------------------#
-
-# Read a WFDB single or multi segment record. Return a Record or MultiRecord object
-def rdsamp(recordname, sampfrom=0, sampto=None, channels = None, physical = True, pbdir = None,
- m2s = True, smoothframes = True, ignoreskew=False, returnres=64):
- """Read a WFDB record and return the signal and record descriptors as attributes in a
- Record or MultiRecord object.
-
- Usage:
- record = rdsamp(recordname, sampfrom=0, sampto=None, channels=None, physical=True, pbdir = None,
- m2s=True, smoothframes = True, ignoreskew=False)
-
- Input arguments:
- - recordname (required): The name of the WFDB record to be read (without any file extensions).
- If the argument contains any path delimiter characters, the argument will be interpreted as
- PATH/baserecord and the data files will be searched for in the local path.
- - sampfrom (default=0): The starting sample number to read for each channel.
- - sampto (default=None): The sample number at which to stop reading for each channel.
- - channels (default=all): Indices specifying the channel to be returned.
- - physical (default=True): Flag that specifies whether to return signals in physical units in
- the p_signals field (True), or digital units in the d_signals field (False).
- - pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
- directory from which to find the required record files.
- eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- - m2s (default=True): Flag used when reading multi-segment records. Specifies whether to
- directly return a wfdb MultiRecord object (False), or to convert it into and return a wfdb
- Record object (True).
- - smoothframes (default=True): Flag used when reading records with signals having multiple
- samples per frame. Specifies whether to smooth the samples in signals with more than
- one sample per frame and return an mxn uniform numpy array as the d_signals or p_signals
- field (True), or to return a list of 1d numpy arrays containing every expanded sample as
- the e_d_signals or e_p_signals field (False).
- - ignoreskew (default=False): Flag used when reading records with at least one skewed signal.
- Specifies whether to apply the skew to align the signals in the output variable (False), or
- to ignore the skew field and load in all values contained in the dat files unaligned (True).
- - returnres (default=64): The numpy array dtype of the returned signals. Options are: 64, 32,
- 16, and 8, where the value represents the numpy int or float dtype. Note that the value
- cannot be 8 when physical is True since there is no float8 format.
-
- Output argument:
- - record: The wfdb Record or MultiRecord object representing the contents of the record read.
-
- Note: If a signal range or channel selection is specified when calling this function, the
- the resulting attributes of the returned object will be set to reflect the section
- of the record that is actually read, rather than necessarily what is in the header file.
- For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
- 'nsig' attribute will be 3, not 12.
-
- Note: The 'srdsamp' function exists as a simple alternative to 'rdsamp' for the most common
- purpose of extracting the physical signals and a few important descriptor fields.
- 'srdsamp' returns two arguments: the physical signals array, and a dictionary of a
- few select fields, a subset of the original wfdb Record attributes.
-
- Example Usage:
- import wfdb
- ecgrecord = wfdb.rdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
- """
-
- dirname, baserecordname = os.path.split(recordname)
-
- # Read the header fields into the appropriate record object
- record = rdheader(recordname, pbdir = pbdir, rdsegments = False)
-
- # Set defaults for sampto and channels input variables
- if sampto is None:
- sampto = record.siglen
- if channels is None:
- channels = list(range(record.nsig))
-
- # Ensure that input fields are valid for the record
- record.checkreadinputs(sampfrom, sampto, channels, physical, m2s, smoothframes, returnres)
-
- # A single segment record
- if isinstance(record, Record):
-
- # Only 1 sample/frame, or frames are smoothed. Return uniform numpy array
- if smoothframes or max([record.sampsperframe[c] for c in channels])==1:
- # Read signals from the associated dat files that contain wanted channels
- record.d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
- record.byteoffset, record.sampsperframe, record.skew,
- sampfrom, sampto, channels, smoothframes, ignoreskew)
-
- # Arrange/edit the object fields to reflect user channel and/or signal range input
- record.arrangefields(channels, expanded=False)
-
- if physical is True:
- # Perform inplace dac to get physical signal
- record.dac(expanded=False, returnres=returnres, inplace=True)
-
- # Return each sample of the signals with multiple samples per frame
- else:
- record.e_d_signals = _signals.rdsegment(record.filename, dirname, pbdir, record.nsig, record.fmt, record.siglen,
- record.byteoffset, record.sampsperframe, record.skew,
- sampfrom, sampto, channels, smoothframes, ignoreskew)
-
- # Arrange/edit the object fields to reflect user channel and/or signal range input
- record.arrangefields(channels, expanded=True)
-
- if physical is True:
- # Perform dac to get physical signal
- record.dac(expanded=True, returnres=returnres, inplace=True)
-
-
- # A multi segment record
-
- # We can make another rdsamp function (called rdsamp_segment) to call
- # for individual segments to deal with the skews.
- else:
- # Strategy:
- # 1. Read the required segments and store them in
- # Record objects.
- # 2. Update the parameters of the objects to reflect
- # the state of the sections read.
- # 3. Update the parameters of the overall MultiRecord
- # object to reflect the state of the individual segments.
- # 4. If specified, convert the MultiRecord object
- # into a single Record object.
-
- # Segments field is a list of Record objects
- # Empty segments store None.
-
- record.segments = [None]*record.nseg
-
- # Variable layout
- if record.seglen[0] == 0:
- record.layout = 'Variable'
- # Read the layout specification header
- record.segments[0] = rdheader(os.path.join(dirname, record.segname[0]), pbdir=pbdir)
- # Fixed layout
- else:
- record.layout = 'Fixed'
-
- # The segment numbers and samples within each segment to read.
- readsegs, segranges = record.requiredsegments(sampfrom, sampto, channels)
- # The signals within each segment to read
- segsigs = record.requiredsignals(readsegs, channels, dirname, pbdir)
-
- # Read the desired samples in the relevant segments
- for i in range(len(readsegs)):
- segnum = readsegs[i]
- # Empty segment or segment with no relevant channels
- if record.segname[segnum] == '~' or segsigs[i] is None:
- record.segments[segnum] = None
- else:
- record.segments[segnum] = rdsamp(os.path.join(dirname, record.segname[segnum]),
- sampfrom = segranges[i][0], sampto = segranges[i][1],
- channels = segsigs[i], physical = True, pbdir=pbdir)
-
- # Arrange the fields of the overall object to reflect user input
- record.arrangefields(readsegs, segranges, channels)
-
- # Convert object into a single segment Record object
- if m2s:
- record = record.multi_to_single(returnres=returnres)
-
- # Perform dtype conversion if necessary
- if isinstance(record, Record) and record.nsig>0:
- record.convert_dtype(physical, returnres, smoothframes)
-
- return record
-
-
-# Read a WFDB header. Return a Record object or MultiRecord object
-def rdheader(recordname, pbdir = None, rdsegments = False):
- """Read a WFDB header file and return the record descriptors as attributes in a Record object
-
- Usage:
- record = rdheader(recordname, pbdir = None, rdsegments = False)
-
- Input arguments:
- - recordname (required): The name of the WFDB record to be read (without any file extensions).
- If the argument contains any path delimiter characters, the argument will be interpreted as
- PATH/baserecord and the header file will be searched for in the local path.
- - pbdir (default=None): Option used to stream data from Physiobank. The Physiobank database
- directory from which to find the required record files.
- eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb', pbdir = 'mitdb'.
- - rdsegments (default=False): Boolean flag used when reading multi-segment headers. If True,
- segment headers will also be read (into the record object's 'segments' field).
-
- Output argument:
- - record: The wfdb Record or MultiRecord object representing the contents of the header read.
-
- Example Usage:
- import wfdb
- ecgrecord = wfdb.rdheader('sampledata/test01_00s', sampfrom=800, channels = [1,3])
- """
-
- # Read the header file. Separate comment and non-comment lines
- headerlines, commentlines = _headers.getheaderlines(recordname, pbdir)
-
- # Get fields from record line
- d_rec = _headers.read_rec_line(headerlines[0])
-
- # Processing according to whether the header is single or multi segment
-
- # Single segment header - Process signal specification lines
- if d_rec['nseg'] is None:
- # Create a single-segment WFDB record object
- record = Record()
-
- # There is at least one channel
- if len(headerlines)>1:
- # Read the fields from the signal lines
- d_sig = _headers.read_sig_lines(headerlines[1:])
- # Set the object's signal line fields
- for field in _headers.sigfieldspecs:
- setattr(record, field, d_sig[field])
-
- # Set the object's record line fields
- for field in _headers.recfieldspecs:
- if field == 'nseg':
- continue
- setattr(record, field, d_rec[field])
- # Multi segment header - Process segment specification lines
- else:
- # Create a multi-segment WFDB record object
- record = MultiRecord()
- # Read the fields from the segment lines
- d_seg = _headers.read_seg_lines(headerlines[1:])
- # Set the object's segment line fields
- for field in _headers.segfieldspecs:
- setattr(record, field, d_seg[field])
- # Set the objects' record line fields
- for field in _headers.recfieldspecs:
- setattr(record, field, d_rec[field])
- # Determine whether the record is fixed or variable
- if record.seglen[0] == 0:
- record.layout = 'Variable'
- else:
- record.layout = 'Fixed'
-
- # If specified, read the segment headers
- if rdsegments:
- record.segments = []
- # Get the base record name (could be empty)
- dirname = os.path.split(recordname)[0]
- for s in record.segname:
- if s == '~':
- record.segments.append(None)
- else:
- record.segments.append(rdheader(os.path.join(dirname,s), pbdir))
- # Fill in the signame attribute
- record.signame = record.getsignames()
- # Fill in the sigsegments attribute
- record.sigsegments = record.getsigsegments()
-
- # Set the comments field
- record.comments = []
- for line in commentlines:
- record.comments.append(line.strip(' \t#'))
-
- return record
-
-
-# Given some wanted signal names, and the signal names contained
-# in a record, return the indices of the record channels that intersect.
-# Remember that the wanted signal names are already in order specified in user input channels. So it's good!
-def wanted_siginds(wanted_signames, record_signames):
- contained_signals = [s for s in wanted_signames if s in record_signames]
- if contained_signals == []:
- return None
- else:
- return [record_signames.index(s) for s in contained_signals]
-
-
-# A simple version of rdsamp for ease of use
-# Return the physical signals and a few essential fields
-def srdsamp(recordname, sampfrom=0, sampto=None, channels = None, pbdir = None):
- """Read a WFDB record and return the physical signal and a few important descriptor fields
-
- Usage:
- signals, fields = srdsamp(recordname, sampfrom=0, sampto=None, channels=None, pbdir=None)
-
- Input arguments:
- - recordname (required): The name of the WFDB record to be read (without any file extensions).
- If the argument contains any path delimiter characters, the argument will be interpreted as
- PATH/baserecord and the data files will be searched for in the local path.
- - sampfrom (default=0): The starting sample number to read for each channel.
- - sampto (default=None): The sample number at which to stop reading for each channel.
- - channels (default=all): Indices specifying the channel to be returned.
-
- Output arguments:
- - signals: A 2d numpy array storing the physical signals from the record.
- - fields: A dictionary specifying several key attributes of the read record:
- - fs: The sampling frequency of the record
- - units: The units for each channel
- - signame: The signal name for each channel
- - comments: Any comments written in the header
-
- Note: If a signal range or channel selection is specified when calling this function, the
- the resulting attributes of the returned object will be set to reflect the section
- of the record that is actually read, rather than necessarily what is in the header file.
- For example, if channels = [0, 1, 2] is specified when reading a 12 channel record, the
- 'nsig' attribute will be 3, not 12.
-
- Note: The 'rdsamp' function is the base function upon which this one is built. It returns
- all attributes present, along with the signals, as attributes in a wfdb.Record object.
- The function, along with the returned data type, have more options than 'srdsamp' for
- users who wish to more directly manipulate WFDB files.
-
- Example Usage:
- import wfdb
- sig, fields = wfdb.srdsamp('sampledata/test01_00s', sampfrom=800, channels = [1,3])
- """
-
- record = rdsamp(recordname, sampfrom, sampto, channels, True, pbdir, True)
-
- signals = record.p_signals
- fields = {}
- for field in ['fs','units','signame', 'comments']:
- fields[field] = getattr(record, field)
-
- return signals, fields
-
-#------------------- /Reading Records -------------------#
-
-
-# Function for writing single segment records
-def wrsamp(recordname, fs, units, signames, p_signals=None, d_signals=None,
- fmt=None, gain=None, baseline=None, comments=None, basetime=None,
- basedate=None):
- """Write a single segment WFDB record, creating a WFDB header file and any associated dat files.
-
- Usage:
- wrsamp(recordname, fs, units, signames, p_signals = None, d_signals=None,
- fmt = None, gain = None, baseline = None, comments = None)
-
- Input arguments:
- - recordname (required): The string name of the WFDB record to be written (without any file extensions).
- - fs (required): The numerical sampling frequency of the record.
- - units (required): A list of strings giving the units of each signal channel.
- - signames (required): A list of strings giving the signal name of each signal channel.
- - p_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the physical signal
- values intended to be written. Either p_signals or d_signals must be set, but not both. If p_signals
- is set, this method will use it to perform analogue-digital conversion, writing the resultant digital
- values to the dat file(s). If fmt is set, gain and baseline must be set or unset together. If fmt is
- unset, gain and baseline must both be unset.
- - d_signals (default=None): An MxN 2d numpy array, where M is the signal length. Gives the digital signal
- values intended to be directly written to the dat file(s). The dtype must be an integer type. Either
- p_signals or d_signals must be set, but not both. In addition, if d_signals is set, fmt, gain and baseline
- must also all be set.
- - fmt (default=None): A list of strings giving the WFDB format of each file used to store each channel.
- Accepted formats are: "80","212","16","24", and "32". There are other WFDB formats but this library
- will not write (though it will read) those file types.
- - gain (default=None): A list of integers specifying the ADC gain.
- - baseline (default=None): A list of integers specifying the digital baseline.
- - comments (default=None): A list of string comments to be written to the header file.
- - basetime (default=None): A string of the record's start time in 24h HH:MM:SS(.ms) format.
- - basedate (default=None): A string of the record's start date in DD/MM/YYYY format.
-
- Note: This gateway function was written to enable a simple way to write WFDB record files using
- the most frequently used parameters. Therefore not all WFDB fields can be set via this function.
-
- For more control over attributes, create a wfdb.Record object, manually set its attributes, and
- call its wrsamp() instance method. If you choose this more advanced method, see also the setdefaults,
- set_d_features, and set_p_features instance methods to help populate attributes.
-
- Example Usage (with the most common scenario of input parameters):
- import wfdb
- # Read part of a record from Physiobank
- sig, fields = wfdb.srdsamp('a103l', sampfrom = 50000, channels = [0,1], pbdir = 'challenge/2015/training')
- # Write a local WFDB record (manually inserting fields)
- wfdb.wrsamp('ecgrecord', fs = 250, units = ['mV', 'mV'], signames = ['I', 'II'], p_signals = sig, fmt = ['16', '16'])
- """
-
- # Check input field combinations
- if p_signals is not None and d_signals is not None:
- raise Exception('Must only give one of the inputs: p_signals or d_signals')
- if d_signals is not None:
- if fmt is None or gain is None or baseline is None:
- raise Exception("When using d_signals, must also specify 'fmt', 'gain', and 'baseline' fields.")
- # Depending on whether d_signals or p_signals was used, set other required features.
- if p_signals is not None:
- # Create the Record object
- record = Record(recordname=recordname, p_signals=p_signals, fs=fs,
- fmt=fmt, units=units, signame=signames, adcgain = gain,
- baseline=baseline, comments=comments, basetime=basetime,
- basedate=basedate)
- # Compute optimal fields to store the digital signal, carry out adc, and set the fields.
- record.set_d_features(do_adc = 1)
- else:
- # Create the Record object
- record = Record(recordname=recordname, d_signals=d_signals, fs=fs,
- fmt=fmt, units=units, signame = signames, adcgain = gain,
- baseline=baseline, comments=comments, basetime=basetime,
- basedate=basedate)
- # Use d_signals to set the fields directly
- record.set_d_features()
-
- # Set default values of any missing field dependencies
- record.setdefaults()
- # Write the record files - header and associated dat
- record.wrsamp()
-
-
-# Time string parser for WFDB header - H(H):M(M):S(S(.sss)) format.
-def parsetimestring(timestring):
- times = re.findall("(?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2}[.\d+]*)", timestring)
-
- if not times:
- raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
- else:
- hours, minutes, seconds = times[0]
-
- if not hours or not minutes or not seconds:
- raise ValueError("Invalid time string: "+timestring+". Acceptable format is: 'Hours:Minutes:Seconds'")
-
- hours = int(hours)
- minutes = int(minutes)
- seconds = float(seconds)
-
- if int(hours) >23:
- raise ValueError('hours must be < 24')
- elif hours<0:
- raise ValueError('hours must be positive')
- if minutes>59:
- raise ValueError('minutes must be < 60')
- elif minutes<0:
- raise ValueError('minutes must be positive')
- if seconds>59:
- raise ValueError('seconds must be < 60')
- elif seconds<0:
- raise ValueError('seconds must be positive')
-
- return (hours, minutes, seconds)
-
-# Date string parser for WFDB header - DD/MM/YYYY
-def parsedatestring(datestring):
- dates = re.findall(r"(?P\d{2})/(?P\d{2})/(?P\d{4})", datestring)
-
- if not dates:
- raise ValueError("Invalid date string. Acceptable format is: 'DD/MM/YYYY'")
- else:
- day, month, year = dates[0]
-
- day = int(day)
- month = int(month)
- year = int(year)
-
- if year<1:
- raise ValueError('year must be positive')
- if month<1 or month>12:
- raise ValueError('month must be between 1 and 12')
- if day not in range(1, monthrange(year, month)[1]+1):
- raise ValueError('day does not exist for specified year and month')
-
- return (day, month, year)
-
-# Returns the unique elements in a list in the order that they appear.
-# Also returns the indices of the original list that correspond to each output element.
-def orderedsetlist(fulllist):
- uniquelist = []
- original_inds = {}
-
- for i in range(0, len(fulllist)):
- item = fulllist[i]
- # new item
- if item not in uniquelist:
- uniquelist.append(item)
- original_inds[item] = [i]
- # previously seen item
- else:
- original_inds[item].append(i)
- return uniquelist, original_inds
-
-# Returns elements in a list without consecutive repeated values.
-def orderednoconseclist(fulllist):
- noconseclist = [fulllist[0]]
- if len(fulllist) == 1:
- return noconseclist
- for i in fulllist:
- if i!= noconseclist[-1]:
- noconseclist.append(i)
- return noconseclist
-
-
-
-
-# *These downloading files gateway function rely on the Record/MultiRecord objects.
-# They are placed here rather than in downloads.py in order to avoid circular imports
-
-
-# Download WFDB files from a physiobank database
-# This function only targets databases with WFDB records (EDF and MIT format).
-# If the database doesn't have a 'RECORDS" file, it will fail.
-def dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False):
- """Download WFDB record (and optionally annotation) files from a Physiobank database. The database
- must contain a 'RECORDS' file in its base directory which lists its WFDB records.
-
- Usage:
- dldatabase(pbdb, dlbasedir, records = 'all', annotators = 'all' , keepsubdirs = True, overwrite = False)
-
- Input arguments:
- - pbdb (required): The Physiobank database directory to download.
- eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- - dlbasedir (required): The full local directory path in which to download the files.
- - records (default='all'): Specifier of the WFDB records to download. Is either a list of strings
- which each specify a record, or 'all' to download all records listed in the database's RECORDS file.
- eg. records = ['test01_00s', test02_45s] for database https://physionet.org/physiobank/database/macecgdb/
- - annotators (default='all'): Specifier of the WFDB annotation file types to download along with
- the record files. Is either None to skip downloading any annotations, 'all' to download all
- annotation types as specified by the ANNOTATORS file, or a list of strings which each specify an
- annotation extension.
- eg. annotators = ['anI'] for database https://physionet.org/physiobank/database/prcp/
- - keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
- as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- - overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
- existing files with the same name and relative subdirectory will be checked. If the local file is
- the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
- and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
- partially downloaded and the remaining bytes will be downloaded and appended.
-
- Example Usage:
- import wfdb
- wfdb.dldatabase('ahadb', os.getcwd())
- """
-
- # Full url physiobank database
- dburl = posixpath.join(downloads.dbindexurl, pbdb)
- # Check if the database is valid
- r = requests.get(dburl)
- r.raise_for_status()
-
-
- # Get the list of records
- recordlist = downloads.getrecordlist(dburl, records)
- # Get the annotator extensions
- annotators = downloads.getannotators(dburl, annotators)
-
- # All files to download (relative to the database's home directory)
- allfiles = []
-
- for rec in recordlist:
- # Check out whether each record is in MIT or EDF format
- if rec.endswith('.edf'):
- allfiles.append(rec)
-
- else:
- # If MIT format, have to figure out all associated files
- allfiles.append(rec+'.hea')
- dirname, baserecname = os.path.split(rec)
- record = rdheader(baserecname, pbdir = posixpath.join(pbdb, dirname))
-
- # Single segment record
- if isinstance(record, Record):
- # Add all dat files of the segment
- for file in record.filename:
- allfiles.append(posixpath.join(dirname, file))
-
- # Multi segment record
- else:
- for seg in record.segname:
- # Skip empty segments
- if seg == '~':
- continue
- # Add the header
- allfiles.append(posixpath.join(dirname, seg+'.hea'))
- # Layout specifier has no dat files
- if seg.endswith('_layout'):
- continue
- # Add all dat files of the segment
- recseg = rdheader(seg, pbdir = posixpath.join(pbdb, dirname))
- for file in recseg.filename:
- allfiles.append(posixpath.join(dirname, file))
- # check whether the record has any requested annotation files
- if annotators is not None:
- for a in annotators:
- annfile = rec+'.'+a
- url = posixpath.join(downloads.dbindexurl, pbdb, annfile)
- rh = requests.head(url)
-
- if rh.status_code != 404:
- allfiles.append(annfile)
-
- dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in allfiles]
-
- # Make any required local directories
- downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
-
- print('Downloading files...')
- # Create multiple processes to download files.
- # Limit to 2 connections to avoid overloading the server
- pool = multiprocessing.Pool(processes=2)
- pool.map(downloads.dlpbfile, dlinputs)
- print('Finished downloading files')
-
- return
-
-# Download specific files from a physiobank database
-def dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
- """Download specified files from a Physiobank database.
-
- Usage:
- dldatabasefiles(pbdb, dlbasedir, files, keepsubdirs = True, overwrite = False):
-
- Input arguments:
- - pbdb (required): The Physiobank database directory to download.
- eg. For database 'http://physionet.org/physiobank/database/mitdb', pbdb = 'mitdb'.
- - dlbasedir (required): The full local directory path in which to download the files.
- - files (required): A list of strings specifying the file names to download relative to the database
- base directory
- - keepsubdirs (default=True): Whether to keep the relative subdirectories of downloaded files
- as they are organized in Physiobank (True), or to download all files into the same base directory (False).
- - overwrite (default=False): If set to True, all files will be redownloaded regardless. If set to False,
- existing files with the same name and relative subdirectory will be checked. If the local file is
- the same size as the online file, the download is skipped. If the local file is larger, it will be deleted
- and the file will be redownloaded. If the local file is smaller, the file will be assumed to be
- partially downloaded and the remaining bytes will be downloaded and appended.
-
- Example Usage:
- import wfdb
- wfdb.dldatabasefiles('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
- """
-
- # Full url physiobank database
- dburl = posixpath.join(downloads.dbindexurl, pbdb)
- # Check if the database is valid
- r = requests.get(dburl)
- r.raise_for_status()
-
- # Construct the urls to download
- dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], pbdb, dlbasedir, keepsubdirs, overwrite) for file in files]
-
- # Make any required local directories
- downloads.makelocaldirs(dlbasedir, dlinputs, keepsubdirs)
-
- print('Downloading files...')
- # Create multiple processes to download files.
- # Limit to 2 connections to avoid overloading the server
- pool = multiprocessing.Pool(processes=2)
- pool.map(downloads.dlpbfile, dlinputs)
- print('Finished downloading files')
-
- return
diff --git a/wfdb/version.py b/wfdb/version.py
index 9e86e884..afced147 100644
--- a/wfdb/version.py
+++ b/wfdb/version.py
@@ -1 +1 @@
-__version__ = '1.3.9'
+__version__ = '2.0.0'