From 901de838a508b67d7afd68b3392a60fba9f82269 Mon Sep 17 00:00:00 2001 From: ccbrain Date: Tue, 7 Aug 2018 16:01:42 +0100 Subject: [PATCH 1/7] upt --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index d35f202..7dc8864 100644 --- a/Dockerfile +++ b/Dockerfile @@ -134,6 +134,9 @@ RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ && sync \ && sed -i '$isource activate neuro' $ND_ENTRYPOINT +RUN conda update conda; conda install --yes --quiet numpy setuptools numpy scipy matplotlib scikit-learn nose h5py PIL pyside; pip install -q nibabel boto https://github.com/mne-tools/mne-python/archive/master.zip +RUN git clone --depth=1 https://github.com/mne-tools/mne-tools.github.io; mv mne-tools.github.io/dev/_downloads/*.ipynb .; rm -Rf mne-tools.github.io + RUN bash -c 'source activate neuro && jupyter nbextension enable exercise2/main && jupyter nbextension enable spellchecker/main' USER root From 6c56b2fa86ebfbc636acc1f177a83e9cbfd94f73 Mon Sep 17 00:00:00 2001 From: ccbrain Date: Tue, 7 Aug 2018 16:04:27 +0100 Subject: [PATCH 2/7] upt --- Dockerfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7dc8864..99ae800 100644 --- a/Dockerfile +++ b/Dockerfile @@ -134,8 +134,6 @@ RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ && sync \ && sed -i '$isource activate neuro' $ND_ENTRYPOINT -RUN conda update conda; conda install --yes --quiet numpy setuptools numpy scipy matplotlib scikit-learn nose h5py PIL pyside; pip install -q nibabel boto https://github.com/mne-tools/mne-python/archive/master.zip -RUN git clone --depth=1 https://github.com/mne-tools/mne-tools.github.io; mv mne-tools.github.io/dev/_downloads/*.ipynb .; rm -Rf mne-tools.github.io RUN bash -c 'source activate neuro && jupyter nbextension enable exercise2/main && jupyter nbextension enable spellchecker/main' From 5740b426d208619b291077c3f99dfc732f2efa2e Mon Sep 17 00:00:00 2001 From: ccbrain Date: Tue, 7 Aug 2018 16:12:02 +0100 Subject: [PATCH 3/7] upt --- Dockerfile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Dockerfile b/Dockerfile index 99ae800..f5d8ea8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -163,6 +163,13 @@ USER neuro RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py +# Install dependencies and MNE master +RUN conda update conda; conda install --yes --quiet numpy setuptools numpy scipy matplotlib scikit-learn nose h5py PIL pyside; pip install -q nibabel boto https://github.com/mne-tools/mne-python/archive/master.zip + +# Download data +RUN ipython -c "import mne; print(mne.datasets.sample.data_path(verbose=False))" + + WORKDIR /home/neuro/nipype_tutorial CMD ["jupyter-notebook"] From 56e7c7e5943db78e4488ec92473c9c33d65a89d4 Mon Sep 17 00:00:00 2001 From: ccbrain Date: Tue, 7 Aug 2018 16:31:09 +0100 Subject: [PATCH 4/7] Update Dockerfile --- Dockerfile | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index f5d8ea8..99ae800 100644 --- a/Dockerfile +++ b/Dockerfile @@ -163,13 +163,6 @@ USER neuro RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py -# Install dependencies and MNE master -RUN conda update conda; conda install --yes --quiet numpy setuptools numpy scipy matplotlib scikit-learn nose h5py PIL pyside; pip install -q nibabel boto https://github.com/mne-tools/mne-python/archive/master.zip - -# Download data -RUN ipython -c "import mne; print(mne.datasets.sample.data_path(verbose=False))" - - WORKDIR /home/neuro/nipype_tutorial CMD ["jupyter-notebook"] From 10515f838e1a59f089593378e74f560c019c31c4 Mon Sep 17 00:00:00 2001 From: ccbrain Date: Mon, 10 Sep 2018 16:02:21 +0100 Subject: [PATCH 5/7] upt --- tutorial_preprocessing.ipynb | 1670 ++++++++++++++++++++++++++++++++++ 1 file changed, 1670 insertions(+) create mode 100644 tutorial_preprocessing.ipynb diff --git a/tutorial_preprocessing.ipynb b/tutorial_preprocessing.ipynb new file mode 100644 index 0000000..f6d6a7e --- /dev/null +++ b/tutorial_preprocessing.ipynb @@ -0,0 +1,1670 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hands-on 2: How to create a fMRI analysis workflow\n", + "\n", + "The purpose of this section is that you setup a complete fMRI analysis workflow yourself. So that in the end you are able to perform the analysis from A-Z, i.e. from preprocessing to group analysis. This section will cover the analysis part, the previous section [Hands-on 1: Preprocessing](handson_preprocessing.ipynb) handles the preprocessing part.\n", + "\n", + "We will use this opportunity to show you some nice additional interfaces/nodes that might not be relevant to your usual analysis. But it's always nice to know that they exist. And hopefully this will encourage you to investigate all other interfaces that Nipype can bring to the tip of your finger.\n", + "\n", + "Important: You will not be able to go through this notebook if you haven't preprocessed your subjects first." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1st-level Analysis Workflow Structure\n", + "\n", + "In this notebook we will create a workflow that performs 1st-level analysis and normalizes the resulting beta weights to the MNI template. In concrete steps this means:\n", + "\n", + " 1. Specify 1st-level model parameters\n", + " 2. Specify 1st-level contrasts\n", + " 3. Estimate 1st-level contrasts\n", + " 4. Normalize 1st-level contrasts" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports\n", + "\n", + "It's always best to have all relevant module imports at the beginning of your script. So let's import what we most certainly need." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn import plotting\n", + "%matplotlib inline\n", + "\n", + "# Get the Node and Workflow object\n", + "from nipype import Node, Workflow\n", + "\n", + "# Specify which SPM to use\n", + "from nipype.interfaces.matlab import MatlabCommand\n", + "MatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note:** Ideally you would also put the imports of all the interfaces that you use here at the top. But as we will develop the workflow step by step, we can also import the relevant modules as we go." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Nodes and Workflow connections\n", + "\n", + "Let's create all the nodes that we need! Make sure to specify all relevant inputs and keep in mind which ones you later on need to connect in your pipeline.\n", + "\n", + "### Workflow for the 1st-level analysis\n", + "\n", + "We recommend to create the workflow and establish all it's connections at a later place in your script. This helps to have everything nicely together. But for this hands-on example it makes sense to establish the connections between the nodes as we go.\n", + "\n", + "And for this, we first need to create a workflow:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Create the workflow here\n", + "# Hint: use 'base_dir' to specify where to store the working directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st = Workflow(name='work_1st', base_dir='/output/')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Specify 1st-level model parameters (stimuli onsets, duration, etc.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The specify the 1st-level model we need the subject specific onset times and durations of the stimuli. Luckily, as we are working with a BIDS dataset, this information is nicely stored in a `tsv` file:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", + "trialinfo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using pandas is probably the quickest and easiest ways to aggregate stimuli information per condition." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for group in trialinfo.groupby('trial_type'):\n", + " print(group)\n", + " print(\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To create a GLM model, Nipype needs an list of `Bunch` objects per session. As we only have one session, our object needs to look as follows:\n", + "\n", + " [Bunch(conditions=['Finger', 'Foot', 'Lips'],\n", + " durations=[[15.0, 15.0, 15.0, 15.0, 15.0],\n", + " [15.0, 15.0, 15.0, 15.0, 15.0],\n", + " [15.0, 15.0, 15.0, 15.0, 15.0]],\n", + " onsets=[[10, 100, 190, 280, 370],\n", + " [40, 130, 220, 310, 400],\n", + " [70, 160, 250, 340, 430]]\n", + " )]\n", + "\n", + "For more information see either the [official documnetation](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.algorithms.modelgen.html) or the [nipype_tutorial example](https://miykael.github.io/nipype_tutorial/notebooks/example_1stlevel.html#Specify-GLM-Model).\n", + "\n", + "So, let's create this Bunch object that we then can use for the GLM model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from nipype.interfaces.base import Bunch\n", + "\n", + "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", + "conditions = []\n", + "onsets = []\n", + "durations = []\n", + "\n", + "for group in trialinfo.groupby('trial_type'):\n", + " conditions.append(group[0])\n", + " onsets.append(list(group[1].onset -10)) # subtracting 10s due to removing of 4 dummy scans\n", + " durations.append(group[1].duration.tolist())\n", + "\n", + "subject_info = [Bunch(conditions=conditions,\n", + " onsets=onsets,\n", + " durations=durations,\n", + " )]\n", + "subject_info" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Good! Now we can create the node that will create the SPM model. For this we will be using `SpecifySPMModel`. As a reminder the TR of the acquisition is 2.5s and we want to use a high pass filter of 128." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.algorithms.modelgen import SpecifySPMModel" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the SpecifySPMModel node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "modelspec = Node(SpecifySPMModel(concatenate_runs=False,\n", + " input_units='secs',\n", + " output_units='secs',\n", + " time_repetition=2.5,\n", + " high_pass_filter_cutoff=128,\n", + " subject_info=subject_info),\n", + " name=\"modelspec\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This node will also need some additional inputs, such as the preprocessed functional images, the motion parameters etc. We will specify those once we take care of the workflow data input stream." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Specify 1st-level contrasts\n", + "\n", + "To do any GLM analysis, we need to also define the contrasts that we want to investigate. If we recap, we had three different conditions in the **fingerfootlips** task in this dataset:\n", + "\n", + "- **finger**\n", + "- **foot**\n", + "- **lips**\n", + "\n", + "Therefore, we could create the following contrasts (seven T-contrasts and two F-contrasts):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Condition names\n", + "condition_names = ['Finger', 'Foot', 'Lips']\n", + "\n", + "# Contrasts\n", + "cont01 = ['average', 'T', condition_names, [1/3., 1/3., 1/3.]]\n", + "cont02 = ['Finger', 'T', condition_names, [1, 0, 0]]\n", + "cont03 = ['Foot', 'T', condition_names, [0, 1, 0]]\n", + "cont04 = ['Lips', 'T', condition_names, [0, 0, 1]]\n", + "cont05 = ['Finger < others','T', condition_names, [-1, 0.5, 0.5]]\n", + "cont06 = ['Foot < others', 'T', condition_names, [0.5, -1, 0.5]]\n", + "cont07 = ['Lips > others', 'T', condition_names, [-0.5, -0.5, 1]]\n", + "\n", + "cont08 = ['activation', 'F', [cont02, cont03, cont04]]\n", + "cont09 = ['differences', 'F', [cont05, cont06, cont07]]\n", + "\n", + "contrast_list = [cont01, cont02, cont03, cont04, cont05, cont06, cont07, cont08, cont09]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Estimate 1st-level contrasts\n", + "\n", + "Before we can estimate the 1st-level contrasts, we first need to create the 1st-level design. Here you can also specify what kind of basis function you want (HRF, FIR, Fourier, etc.), if you want to use time and dispersion derivatives and how you want to model the serial correlation.\n", + "\n", + "In this example I propose that you use an HRF basis function, that we model time derivatives and that we model the serial correlation with AR(1)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import Level1Design" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the Level1Design node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "level1design = Node(Level1Design(bases={'hrf': {'derivs': [1, 0]}},\n", + " timing_units='secs',\n", + " interscan_interval=2.5,\n", + " model_serial_correlations='AR(1)'),\n", + " name=\"level1design\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have the Model Specification and 1st-Level Design node, we can connect them to each other:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect the two nodes here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(modelspec, level1design, [('session_info',\n", + " 'session_info')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we need to estimate the model. I recommend that you'll use a `Classical: 1` method to estimate the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import EstimateModel" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the EstimateModel node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n", + " name=\"level1estimate\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can connect the 1st-Level Design node with the model estimation node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect the two nodes here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(level1design, level1estimate, [('spm_mat_file',\n", + " 'spm_mat_file')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we estimate the model, we can estimate the contrasts. Don't forget to feed the list of contrast we specify above to this node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import EstimateContrast" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the EstimateContrast node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "level1conest = Node(EstimateContrast(contrasts=contrast_list),\n", + " name=\"level1conest\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can connect the model estimation node with the contrast estimation node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect the two nodes here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(level1estimate, level1conest, [('spm_mat_file',\n", + " 'spm_mat_file'),\n", + " ('beta_images',\n", + " 'beta_images'),\n", + " ('residual_image',\n", + " 'residual_image')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Normalize 1st-level contrasts\n", + "\n", + "Now that the contrasts were estimated in subject space we can put them into a common reference space by normalizing them to a specific template. In this case we will be using SPM12's Normalize routine and normalize to the SPM12 tissue probability map `TPM.nii`.\n", + "\n", + "At this step you can also specify the voxel resolution of the output volumes. If you don't specify it, it will normalize to a voxel resolution of 2x2x2mm. As a training exercise, set the voxel resolution to 4x4x4mm." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import Normalize12\n", + "\n", + "# Location of the template\n", + "template = '/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the Normalize12 node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "normalize = Node(Normalize12(jobtype='estwrite',\n", + " tpm=template,\n", + " write_voxel_sizes=[4, 4, 4]\n", + " ),\n", + " name=\"normalize\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can connect the estimated contrasts to normalization node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect the nodes here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(level1conest, normalize, [('con_images',\n", + " 'apply_to_files')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Datainput with `SelectFiles` and `iterables` \n", + "\n", + "As in the preprocessing hands-on, we will again be using [`SelectFiles`](../../../nipype_tutorial/notebooks/basic_data_input.ipynb#SelectFiles) and [`iterables`](../../../nipype_tutorial/notebooks/basic_iteration.ipynb). So, what do we need?\n", + "\n", + "From the preprocessing pipeline, we need the functional images, the motion parameters and the list of outliers. Also, for the normalization we need the subject specific anatomy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the SelectFiles\n", + "from nipype import SelectFiles\n", + "\n", + "# String template with {}-based strings\n", + "templates = {'anat': '/data/ds000114/sub-{subj_id}/ses-test/anat/sub-{subj_id}_ses-test_T1w.nii.gz',\n", + " 'func': '/output/datasink_handson/preproc/sub-{subj_id}_detrend.nii.gz',\n", + " 'mc_param': '/output/datasink_handson/preproc/sub-{subj_id}.par',\n", + " 'outliers': '/output/datasink_handson/preproc/art.sub-{subj_id}_outliers.txt'\n", + " }\n", + "\n", + "# Create SelectFiles node\n", + "sf = Node(SelectFiles(templates, sort_filelist=True),\n", + " name='selectfiles')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can specify over which subjects the workflow should iterate. As we preprocessed only subjects 1 to 5, we can only them for this analysis." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# list of subject identifiers\n", + "subject_list = ['02', '03', '04', '07', '08', '09']\n", + "sf.iterables = [('subj_id', subject_list)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Gunzip Node" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "SPM12 can accept NIfTI files as input, but online if they are not compressed ('unzipped'). Therefore, we need to use a `Gunzip` node to unzip the detrend file and another one to unzip the anatomy image, before we can feed it to the model specification node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.algorithms.misc import Gunzip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the two Gunzip node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "gunzip_anat = Node(Gunzip(), name='gunzip_anat')\n", + "gunzip_func = Node(Gunzip(), name='gunzip_func')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And as a final step, we just need to connect this `SelectFiles` node to the rest of the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect SelectFiles node to the other nodes here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(sf, gunzip_anat, [('anat', 'in_file')]),\n", + " (sf, gunzip_func, [('func', 'in_file')]),\n", + " (gunzip_anat, normalize, [('out_file', 'image_to_align')]),\n", + " (gunzip_func, modelspec, [('out_file', 'functional_runs')]),\n", + " (sf, modelspec, [('mc_param', 'realignment_parameters'),\n", + " ('outliers', 'outlier_files'),\n", + " ])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data output with `DataSink`\n", + "\n", + "Now, before we run the workflow, let's again specify a `Datasink` folder to only keep those files that we want to keep." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.io import DataSink" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate DataSink node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "# Initiate the datasink node\n", + "output_folder = 'datasink_handson'\n", + "datasink = Node(DataSink(base_directory='/output/',\n", + " container=output_folder),\n", + " name=\"datasink\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Use the following substitutions for the DataSink output\n", + "substitutions = [('_subj_id_', 'sub-')]\n", + "datasink.inputs.substitutions = substitutions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the next step is to specify all the output that we want to keep in our output folder `output`. Probably best to keep are the:\n", + "- SPM.mat file and the spmT and spmF files from the contrast estimation node\n", + "- normalized betas and anatomy" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect nodes to datasink here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis1st.connect([(level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),\n", + " ('spmT_images', '1stLevel.@T'),\n", + " ('spmF_images', '1stLevel.@F'),\n", + " ]),\n", + " (normalize, datasink, [('normalized_files', 'normalized.@files'),\n", + " ('normalized_image', 'normalized.@image'),\n", + " ]),\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the workflow\n", + "\n", + "Now that the workflow is finished, let's visualize it again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create 1st-level analysis output graph\n", + "analysis1st.write_graph(graph2use='colored', format='png', simple_form=True)\n", + "\n", + "# Visualize the graph\n", + "from IPython.display import Image\n", + "Image(filename='/output/work_1st/graph.png')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Workflow\n", + "\n", + "Now that everything is ready, we can run the 1st-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "analysis1st.run('MultiProc', plugin_args={'n_procs': 4})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's look at the 1st-level Design Matrix of subject one, to verify that everything is as it should be." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from scipy.io import loadmat\n", + "\n", + "# Using scipy's loadmat function we can access SPM.mat\n", + "spmmat = loadmat('/output/datasink_handson/1stLevel/sub-07/SPM.mat',\n", + " struct_as_record=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The design matrix and the names of the regressors are a bit hidden in the `spmmat` variable, but they can be accessed as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "designMatrix = spmmat['SPM'][0][0].xX[0][0].X\n", + "names = [i[0] for i in spmmat['SPM'][0][0].xX[0][0].name[0]]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now before we can plot it, we just need to normalize the desing matrix in such a way, that each column has a maximum amplitude of 1. This is just for visualization purposes, otherwise the rotation parameters with their rather small values will not show up in the figure." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "normed_design = designMatrix / np.abs(designMatrix).max(axis=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we're ready to plot the design matrix." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(figsize=(8, 8))\n", + "plt.imshow(normed_design, aspect='auto', cmap='gray', interpolation='none')\n", + "ax.set_ylabel('Volume id')\n", + "ax.set_xticks(np.arange(len(names)))\n", + "ax.set_xticklabels(names, rotation=90);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we're happy with the design matrix, let's look how well the normalization worked." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nb\n", + "from nilearn.plotting import plot_anat\n", + "from nilearn.plotting import plot_glass_brain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load GM probability map of TPM.nii\n", + "img = nb.load('/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii')\n", + "GM_template = nb.Nifti1Image(img.get_data()[..., 0], img.affine, img.header)\n", + "\n", + "# Plot normalized subject anatomy\n", + "display = plot_anat('/output/datasink_handson/normalized/sub-07/wsub-07_ses-test_T1w.nii',\n", + " dim=-0.1)\n", + "\n", + "# Overlay in edges GM map\n", + "display.add_edges(GM_template)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at the contrasts of one subject that we've just computed. In particular the F-contrast." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wess_0008.nii',\n", + " colorbar=True, display_mode='lyrz', black_bg=True, threshold=25,\n", + " title='subject 7 - F-contrast: Activation');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wess_0009.nii',\n", + " colorbar=True, display_mode='lyrz', black_bg=True, threshold=25,\n", + " title='subject 7 - F-contrast: Differences');" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2nd-level Analysis Workflow Structure\n", + "\n", + "Last but not least, the group level analysis. This example will also directly include thresholding of the output, as well as some visualization." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports\n", + "\n", + "To make sure that the necessary imports are done, here they are again:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the Node and Workflow object\n", + "from nipype import Node, Workflow\n", + "\n", + "# Specify which SPM to use\n", + "from nipype.interfaces.matlab import MatlabCommand\n", + "MatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Nodes and Workflow connections\n", + "\n", + "Now we should know this part very well.\n", + "\n", + "### Workflow for the 2nd-level analysis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Create the workflow here\n", + "# Hint: use 'base_dir' to specify where to store the working directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis2nd = Workflow(name='work_2nd', base_dir='/output/')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2nd-Level Design\n", + "\n", + "This step depends on your study design and the tests you want to perform. If you're using SPM to do the group analysis, you have the liberty to choose between a factorial design, a multiple regression design, one sample T-Test design, a paired T-Test design or a two sample T-Test design.\n", + "\n", + "For the current example, we will be using a one sample T-Test design." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import OneSampleTTestDesign" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the OneSampleTTestDesign node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "onesamplettestdes = Node(OneSampleTTestDesign(), name=\"onesampttestdes\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next two steps are the same as for the 1st-level design, i.e. estimation of the model followed by estimation of the contrasts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import EstimateModel, EstimateContrast" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate the EstimateModel and the EstimateContrast node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n", + " name=\"level2estimate\")\n", + "\n", + "level2conestimate = Node(EstimateContrast(group_contrast=True),\n", + " name=\"level2conestimate\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To finish the `EstimateContrast` node, we also need to specify which contrast should be computed. For a 2nd-level one sample t-test design, this is rather straight forward:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cont01 = ['Group', 'T', ['mean'], [1]]\n", + "level2conestimate.inputs.contrasts = [cont01]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's connect those three design nodes to each other." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect OneSampleTTestDesign, EstimateModel and EstimateContrast here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis2nd.connect([(onesamplettestdes, level2estimate, [('spm_mat_file',\n", + " 'spm_mat_file')]),\n", + " (level2estimate, level2conestimate, [('spm_mat_file',\n", + " 'spm_mat_file'),\n", + " ('beta_images',\n", + " 'beta_images'),\n", + " ('residual_image',\n", + " 'residual_image')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Thresholding of output contrast\n", + "\n", + "And to close, we will use SPM `Threshold`. With this routine, we can set a specific voxle threshold (i.e. *p*<0.001) and apply an FDR cluster threshold (i.e. *p*<0.05).\n", + "\n", + "As we only have 5 subjects, I recommend to set the voxel threshold to 0.01 and to leave the cluster threshold at 0.05." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import Threshold" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "level2thresh = Node(Threshold(contrast_index=1,\n", + " use_topo_fdr=True,\n", + " use_fwe_correction=False,\n", + " extent_threshold=0,\n", + " height_threshold=0.01,\n", + " height_threshold_type='p-value',\n", + " extent_fdr_p_threshold=0.05),\n", + " name=\"level2thresh\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect the Threshold node to the EstimateContrast node herer" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis2nd.connect([(level2conestimate, level2thresh, [('spm_mat_file',\n", + " 'spm_mat_file'),\n", + " ('spmT_images',\n", + " 'stat_image'),\n", + " ])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gray Matter Mask\n", + "\n", + "We could run our 2nd-level workflow as it is. All the major nodes are there. But I nonetheless suggest that we use a gray matter mask to restrict the analysis to only gray matter voxels.\n", + "\n", + "In the 1st-level analysis, we normalized to SPM12's `TPM.nii` tissue probability atlas. Therefore, we could just take the gray matter probability map of this `TPM.nii` image (the first volume) and threshold it at a certain probability value to get a binary mask. This can of course also all be done in Nipype, but sometimes the direct bash code is quicker:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "TEMPLATE='/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'\n", + "\n", + "# Extract the first volume with `fslroi`\n", + "fslroi $TEMPLATE GM_PM.nii.gz 0 1\n", + "\n", + "# Threshold the probability mask at 10%\n", + "fslmaths GM_PM.nii -thr 0.10 -bin /output/datasink_handson/GM_mask.nii.gz\n", + "\n", + "# Unzip the mask and delete the GM_PM.nii file\n", + "gunzip /output/datasink_handson/GM_mask.nii.gz\n", + "rm GM_PM.nii.gz" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at this mask:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nb\n", + "mask = nb.load('/output/datasink_handson/GM_mask.nii')\n", + "mask.orthoview()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we just need to specify this binary mask as an `explicit_mask_file` for the one sample T-test node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "onesamplettestdes.inputs.explicit_mask_file = '/output/datasink_handson/GM_mask.nii'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Datainput with `SelectFiles` and `iterables` \n", + "\n", + "We will again be using [`SelectFiles`](../../../nipype_tutorial/notebooks/basic_data_input.ipynb#SelectFiles) and [`iterables`](../../../nipype_tutorial/notebooks/basic_iteration.ipynb).\n", + "\n", + "So, what do we need? Actually, just the 1st-level contrasts of all subjects, separated by contrast number." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the SelectFiles\n", + "from nipype import SelectFiles\n", + "\n", + "# String template with {}-based strings\n", + "templates = {'cons': '/output/datasink_handson/normalized/sub-*/w*_{cont_id}.nii'}\n", + "\n", + "# Create SelectFiles node\n", + "sf = Node(SelectFiles(templates, sort_filelist=True),\n", + " name='selectfiles')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are using `*` to tell `SelectFiles` that it can grab all available subjects and any contrast, with a specific contrast id, independnet if it's an t-contrast (`con`) or an F-contrast (`ess`) contrast.\n", + "\n", + "So, let's specify over which contrast the workflow should iterate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# list of contrast identifiers\n", + "contrast_id_list = ['0001', '0002', '0003', '0004', '0005',\n", + " '0006', '0007', '0008', '0009']\n", + "sf.iterables = [('cont_id', contrast_id_list)]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we need to connect the `SelectFiles` to the `OneSampleTTestDesign` node." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "analysis2nd.connect([(sf, onesamplettestdes, [('cons', 'in_files')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data output with `DataSink`\n", + "\n", + "Now, before we run the workflow, let's again specify a `Datasink` folder to only keep those files that we want to keep." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.io import DataSink" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Initiate DataSink node here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "# Initiate the datasink node\n", + "output_folder = 'datasink_handson'\n", + "datasink = Node(DataSink(base_directory='/output/',\n", + " container=output_folder),\n", + " name=\"datasink\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Use the following substitutions for the DataSink output\n", + "substitutions = [('_cont_id_', 'con_')]\n", + "datasink.inputs.substitutions = substitutions" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the next step is to specify all the output that we want to keep in our output folder `output`. Probably best to keep are the:\n", + "- the SPM.mat file and the spmT images from the `EstimateContrast` node\n", + "- the thresholded spmT images from the `Threshold` node" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden", + "solution2_first": true + }, + "outputs": [], + "source": [ + "# Connect nodes to datasink here" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "solution2": "hidden" + }, + "outputs": [], + "source": [ + "analysis2nd.connect([(level2conestimate, datasink, [('spm_mat_file',\n", + " '2ndLevel.@spm_mat'),\n", + " ('spmT_images',\n", + " '2ndLevel.@T'),\n", + " ('con_images',\n", + " '2ndLevel.@con')]),\n", + " (level2thresh, datasink, [('thresholded_map',\n", + " '2ndLevel.@threshold')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the workflow\n", + "\n", + "And we're good to go. Let's first take a look at the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create 1st-level analysis output graph\n", + "analysis2nd.write_graph(graph2use='colored', format='png', simple_form=True)\n", + "\n", + "# Visualize the graph\n", + "from IPython.display import Image\n", + "Image(filename='/output/work_2nd/graph.png')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Workflow\n", + "\n", + "Now that everything is ready, we can run the 2nd-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "analysis2nd.run('MultiProc', plugin_args={'n_procs': 4})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visualize results\n", + "\n", + "Let's take a look at the results. Keep in mind that we only have *`N=6`* subjects and that we set the voxel threshold to a very liberal `p<0.01`. Interpretation of the results should therefore be taken with a lot of caution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn.plotting import plot_glass_brain\n", + "%matplotlib inline\n", + "out_path = '/output/datasink_handson/2ndLevel/'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0001/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='average (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0002/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Finger (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0003/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Foot (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0004/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Lips (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0005/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Finger < others (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0006/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Foot < others (FDR corrected)');" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain(out_path + 'con_0007/spmT_0001_thr.nii', display_mode='lyrz',\n", + " black_bg=True, colorbar=True, title='Lips > others (FDR corrected)');" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python [default]", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 93d62d3ce929f0cb25eb21c20ffc62f510a8a0bb Mon Sep 17 00:00:00 2001 From: ccbrain Date: Mon, 10 Sep 2018 16:39:48 +0100 Subject: [PATCH 6/7] upt --- tutorial_preprocessing.ipynb | 1670 ---------------------------------- 1 file changed, 1670 deletions(-) delete mode 100644 tutorial_preprocessing.ipynb diff --git a/tutorial_preprocessing.ipynb b/tutorial_preprocessing.ipynb deleted file mode 100644 index f6d6a7e..0000000 --- a/tutorial_preprocessing.ipynb +++ /dev/null @@ -1,1670 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Hands-on 2: How to create a fMRI analysis workflow\n", - "\n", - "The purpose of this section is that you setup a complete fMRI analysis workflow yourself. So that in the end you are able to perform the analysis from A-Z, i.e. from preprocessing to group analysis. This section will cover the analysis part, the previous section [Hands-on 1: Preprocessing](handson_preprocessing.ipynb) handles the preprocessing part.\n", - "\n", - "We will use this opportunity to show you some nice additional interfaces/nodes that might not be relevant to your usual analysis. But it's always nice to know that they exist. And hopefully this will encourage you to investigate all other interfaces that Nipype can bring to the tip of your finger.\n", - "\n", - "Important: You will not be able to go through this notebook if you haven't preprocessed your subjects first." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 1st-level Analysis Workflow Structure\n", - "\n", - "In this notebook we will create a workflow that performs 1st-level analysis and normalizes the resulting beta weights to the MNI template. In concrete steps this means:\n", - "\n", - " 1. Specify 1st-level model parameters\n", - " 2. Specify 1st-level contrasts\n", - " 3. Estimate 1st-level contrasts\n", - " 4. Normalize 1st-level contrasts" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Imports\n", - "\n", - "It's always best to have all relevant module imports at the beginning of your script. So let's import what we most certainly need." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nilearn import plotting\n", - "%matplotlib inline\n", - "\n", - "# Get the Node and Workflow object\n", - "from nipype import Node, Workflow\n", - "\n", - "# Specify which SPM to use\n", - "from nipype.interfaces.matlab import MatlabCommand\n", - "MatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Note:** Ideally you would also put the imports of all the interfaces that you use here at the top. But as we will develop the workflow step by step, we can also import the relevant modules as we go." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Nodes and Workflow connections\n", - "\n", - "Let's create all the nodes that we need! Make sure to specify all relevant inputs and keep in mind which ones you later on need to connect in your pipeline.\n", - "\n", - "### Workflow for the 1st-level analysis\n", - "\n", - "We recommend to create the workflow and establish all it's connections at a later place in your script. This helps to have everything nicely together. But for this hands-on example it makes sense to establish the connections between the nodes as we go.\n", - "\n", - "And for this, we first need to create a workflow:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Create the workflow here\n", - "# Hint: use 'base_dir' to specify where to store the working directory" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st = Workflow(name='work_1st', base_dir='/output/')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Specify 1st-level model parameters (stimuli onsets, duration, etc.)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The specify the 1st-level model we need the subject specific onset times and durations of the stimuli. Luckily, as we are working with a BIDS dataset, this information is nicely stored in a `tsv` file:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", - "trialinfo" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using pandas is probably the quickest and easiest ways to aggregate stimuli information per condition." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for group in trialinfo.groupby('trial_type'):\n", - " print(group)\n", - " print(\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To create a GLM model, Nipype needs an list of `Bunch` objects per session. As we only have one session, our object needs to look as follows:\n", - "\n", - " [Bunch(conditions=['Finger', 'Foot', 'Lips'],\n", - " durations=[[15.0, 15.0, 15.0, 15.0, 15.0],\n", - " [15.0, 15.0, 15.0, 15.0, 15.0],\n", - " [15.0, 15.0, 15.0, 15.0, 15.0]],\n", - " onsets=[[10, 100, 190, 280, 370],\n", - " [40, 130, 220, 310, 400],\n", - " [70, 160, 250, 340, 430]]\n", - " )]\n", - "\n", - "For more information see either the [official documnetation](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.algorithms.modelgen.html) or the [nipype_tutorial example](https://miykael.github.io/nipype_tutorial/notebooks/example_1stlevel.html#Specify-GLM-Model).\n", - "\n", - "So, let's create this Bunch object that we then can use for the GLM model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "from nipype.interfaces.base import Bunch\n", - "\n", - "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", - "conditions = []\n", - "onsets = []\n", - "durations = []\n", - "\n", - "for group in trialinfo.groupby('trial_type'):\n", - " conditions.append(group[0])\n", - " onsets.append(list(group[1].onset -10)) # subtracting 10s due to removing of 4 dummy scans\n", - " durations.append(group[1].duration.tolist())\n", - "\n", - "subject_info = [Bunch(conditions=conditions,\n", - " onsets=onsets,\n", - " durations=durations,\n", - " )]\n", - "subject_info" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Good! Now we can create the node that will create the SPM model. For this we will be using `SpecifySPMModel`. As a reminder the TR of the acquisition is 2.5s and we want to use a high pass filter of 128." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.algorithms.modelgen import SpecifySPMModel" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the SpecifySPMModel node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "modelspec = Node(SpecifySPMModel(concatenate_runs=False,\n", - " input_units='secs',\n", - " output_units='secs',\n", - " time_repetition=2.5,\n", - " high_pass_filter_cutoff=128,\n", - " subject_info=subject_info),\n", - " name=\"modelspec\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This node will also need some additional inputs, such as the preprocessed functional images, the motion parameters etc. We will specify those once we take care of the workflow data input stream." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Specify 1st-level contrasts\n", - "\n", - "To do any GLM analysis, we need to also define the contrasts that we want to investigate. If we recap, we had three different conditions in the **fingerfootlips** task in this dataset:\n", - "\n", - "- **finger**\n", - "- **foot**\n", - "- **lips**\n", - "\n", - "Therefore, we could create the following contrasts (seven T-contrasts and two F-contrasts):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Condition names\n", - "condition_names = ['Finger', 'Foot', 'Lips']\n", - "\n", - "# Contrasts\n", - "cont01 = ['average', 'T', condition_names, [1/3., 1/3., 1/3.]]\n", - "cont02 = ['Finger', 'T', condition_names, [1, 0, 0]]\n", - "cont03 = ['Foot', 'T', condition_names, [0, 1, 0]]\n", - "cont04 = ['Lips', 'T', condition_names, [0, 0, 1]]\n", - "cont05 = ['Finger < others','T', condition_names, [-1, 0.5, 0.5]]\n", - "cont06 = ['Foot < others', 'T', condition_names, [0.5, -1, 0.5]]\n", - "cont07 = ['Lips > others', 'T', condition_names, [-0.5, -0.5, 1]]\n", - "\n", - "cont08 = ['activation', 'F', [cont02, cont03, cont04]]\n", - "cont09 = ['differences', 'F', [cont05, cont06, cont07]]\n", - "\n", - "contrast_list = [cont01, cont02, cont03, cont04, cont05, cont06, cont07, cont08, cont09]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Estimate 1st-level contrasts\n", - "\n", - "Before we can estimate the 1st-level contrasts, we first need to create the 1st-level design. Here you can also specify what kind of basis function you want (HRF, FIR, Fourier, etc.), if you want to use time and dispersion derivatives and how you want to model the serial correlation.\n", - "\n", - "In this example I propose that you use an HRF basis function, that we model time derivatives and that we model the serial correlation with AR(1)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import Level1Design" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the Level1Design node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "level1design = Node(Level1Design(bases={'hrf': {'derivs': [1, 0]}},\n", - " timing_units='secs',\n", - " interscan_interval=2.5,\n", - " model_serial_correlations='AR(1)'),\n", - " name=\"level1design\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have the Model Specification and 1st-Level Design node, we can connect them to each other:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect the two nodes here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(modelspec, level1design, [('session_info',\n", - " 'session_info')])])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we need to estimate the model. I recommend that you'll use a `Classical: 1` method to estimate the model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import EstimateModel" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the EstimateModel node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n", - " name=\"level1estimate\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can connect the 1st-Level Design node with the model estimation node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect the two nodes here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(level1design, level1estimate, [('spm_mat_file',\n", - " 'spm_mat_file')])])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we estimate the model, we can estimate the contrasts. Don't forget to feed the list of contrast we specify above to this node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import EstimateContrast" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the EstimateContrast node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "level1conest = Node(EstimateContrast(contrasts=contrast_list),\n", - " name=\"level1conest\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can connect the model estimation node with the contrast estimation node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect the two nodes here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(level1estimate, level1conest, [('spm_mat_file',\n", - " 'spm_mat_file'),\n", - " ('beta_images',\n", - " 'beta_images'),\n", - " ('residual_image',\n", - " 'residual_image')])])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Normalize 1st-level contrasts\n", - "\n", - "Now that the contrasts were estimated in subject space we can put them into a common reference space by normalizing them to a specific template. In this case we will be using SPM12's Normalize routine and normalize to the SPM12 tissue probability map `TPM.nii`.\n", - "\n", - "At this step you can also specify the voxel resolution of the output volumes. If you don't specify it, it will normalize to a voxel resolution of 2x2x2mm. As a training exercise, set the voxel resolution to 4x4x4mm." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import Normalize12\n", - "\n", - "# Location of the template\n", - "template = '/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the Normalize12 node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "normalize = Node(Normalize12(jobtype='estwrite',\n", - " tpm=template,\n", - " write_voxel_sizes=[4, 4, 4]\n", - " ),\n", - " name=\"normalize\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can connect the estimated contrasts to normalization node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect the nodes here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(level1conest, normalize, [('con_images',\n", - " 'apply_to_files')])\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Datainput with `SelectFiles` and `iterables` \n", - "\n", - "As in the preprocessing hands-on, we will again be using [`SelectFiles`](../../../nipype_tutorial/notebooks/basic_data_input.ipynb#SelectFiles) and [`iterables`](../../../nipype_tutorial/notebooks/basic_iteration.ipynb). So, what do we need?\n", - "\n", - "From the preprocessing pipeline, we need the functional images, the motion parameters and the list of outliers. Also, for the normalization we need the subject specific anatomy." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import the SelectFiles\n", - "from nipype import SelectFiles\n", - "\n", - "# String template with {}-based strings\n", - "templates = {'anat': '/data/ds000114/sub-{subj_id}/ses-test/anat/sub-{subj_id}_ses-test_T1w.nii.gz',\n", - " 'func': '/output/datasink_handson/preproc/sub-{subj_id}_detrend.nii.gz',\n", - " 'mc_param': '/output/datasink_handson/preproc/sub-{subj_id}.par',\n", - " 'outliers': '/output/datasink_handson/preproc/art.sub-{subj_id}_outliers.txt'\n", - " }\n", - "\n", - "# Create SelectFiles node\n", - "sf = Node(SelectFiles(templates, sort_filelist=True),\n", - " name='selectfiles')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can specify over which subjects the workflow should iterate. As we preprocessed only subjects 1 to 5, we can only them for this analysis." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# list of subject identifiers\n", - "subject_list = ['02', '03', '04', '07', '08', '09']\n", - "sf.iterables = [('subj_id', subject_list)]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Gunzip Node" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "SPM12 can accept NIfTI files as input, but online if they are not compressed ('unzipped'). Therefore, we need to use a `Gunzip` node to unzip the detrend file and another one to unzip the anatomy image, before we can feed it to the model specification node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.algorithms.misc import Gunzip" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the two Gunzip node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "gunzip_anat = Node(Gunzip(), name='gunzip_anat')\n", - "gunzip_func = Node(Gunzip(), name='gunzip_func')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And as a final step, we just need to connect this `SelectFiles` node to the rest of the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect SelectFiles node to the other nodes here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(sf, gunzip_anat, [('anat', 'in_file')]),\n", - " (sf, gunzip_func, [('func', 'in_file')]),\n", - " (gunzip_anat, normalize, [('out_file', 'image_to_align')]),\n", - " (gunzip_func, modelspec, [('out_file', 'functional_runs')]),\n", - " (sf, modelspec, [('mc_param', 'realignment_parameters'),\n", - " ('outliers', 'outlier_files'),\n", - " ])\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Data output with `DataSink`\n", - "\n", - "Now, before we run the workflow, let's again specify a `Datasink` folder to only keep those files that we want to keep." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.io import DataSink" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate DataSink node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "# Initiate the datasink node\n", - "output_folder = 'datasink_handson'\n", - "datasink = Node(DataSink(base_directory='/output/',\n", - " container=output_folder),\n", - " name=\"datasink\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Use the following substitutions for the DataSink output\n", - "substitutions = [('_subj_id_', 'sub-')]\n", - "datasink.inputs.substitutions = substitutions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now the next step is to specify all the output that we want to keep in our output folder `output`. Probably best to keep are the:\n", - "- SPM.mat file and the spmT and spmF files from the contrast estimation node\n", - "- normalized betas and anatomy" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect nodes to datasink here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis1st.connect([(level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),\n", - " ('spmT_images', '1stLevel.@T'),\n", - " ('spmF_images', '1stLevel.@F'),\n", - " ]),\n", - " (normalize, datasink, [('normalized_files', 'normalized.@files'),\n", - " ('normalized_image', 'normalized.@image'),\n", - " ]),\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualize the workflow\n", - "\n", - "Now that the workflow is finished, let's visualize it again." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create 1st-level analysis output graph\n", - "analysis1st.write_graph(graph2use='colored', format='png', simple_form=True)\n", - "\n", - "# Visualize the graph\n", - "from IPython.display import Image\n", - "Image(filename='/output/work_1st/graph.png')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run the Workflow\n", - "\n", - "Now that everything is ready, we can run the 1st-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "analysis1st.run('MultiProc', plugin_args={'n_procs': 4})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualize results" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline\n", - "import numpy as np\n", - "from matplotlib import pyplot as plt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, let's look at the 1st-level Design Matrix of subject one, to verify that everything is as it should be." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from scipy.io import loadmat\n", - "\n", - "# Using scipy's loadmat function we can access SPM.mat\n", - "spmmat = loadmat('/output/datasink_handson/1stLevel/sub-07/SPM.mat',\n", - " struct_as_record=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The design matrix and the names of the regressors are a bit hidden in the `spmmat` variable, but they can be accessed as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "designMatrix = spmmat['SPM'][0][0].xX[0][0].X\n", - "names = [i[0] for i in spmmat['SPM'][0][0].xX[0][0].name[0]]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now before we can plot it, we just need to normalize the desing matrix in such a way, that each column has a maximum amplitude of 1. This is just for visualization purposes, otherwise the rotation parameters with their rather small values will not show up in the figure." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "normed_design = designMatrix / np.abs(designMatrix).max(axis=0)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we're ready to plot the design matrix." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, ax = plt.subplots(figsize=(8, 8))\n", - "plt.imshow(normed_design, aspect='auto', cmap='gray', interpolation='none')\n", - "ax.set_ylabel('Volume id')\n", - "ax.set_xticks(np.arange(len(names)))\n", - "ax.set_xticklabels(names, rotation=90);" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we're happy with the design matrix, let's look how well the normalization worked." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import nibabel as nb\n", - "from nilearn.plotting import plot_anat\n", - "from nilearn.plotting import plot_glass_brain" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load GM probability map of TPM.nii\n", - "img = nb.load('/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii')\n", - "GM_template = nb.Nifti1Image(img.get_data()[..., 0], img.affine, img.header)\n", - "\n", - "# Plot normalized subject anatomy\n", - "display = plot_anat('/output/datasink_handson/normalized/sub-07/wsub-07_ses-test_T1w.nii',\n", - " dim=-0.1)\n", - "\n", - "# Overlay in edges GM map\n", - "display.add_edges(GM_template)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's look at the contrasts of one subject that we've just computed. In particular the F-contrast." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wess_0008.nii',\n", - " colorbar=True, display_mode='lyrz', black_bg=True, threshold=25,\n", - " title='subject 7 - F-contrast: Activation');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wess_0009.nii',\n", - " colorbar=True, display_mode='lyrz', black_bg=True, threshold=25,\n", - " title='subject 7 - F-contrast: Differences');" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 2nd-level Analysis Workflow Structure\n", - "\n", - "Last but not least, the group level analysis. This example will also directly include thresholding of the output, as well as some visualization." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Imports\n", - "\n", - "To make sure that the necessary imports are done, here they are again:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get the Node and Workflow object\n", - "from nipype import Node, Workflow\n", - "\n", - "# Specify which SPM to use\n", - "from nipype.interfaces.matlab import MatlabCommand\n", - "MatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create Nodes and Workflow connections\n", - "\n", - "Now we should know this part very well.\n", - "\n", - "### Workflow for the 2nd-level analysis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Create the workflow here\n", - "# Hint: use 'base_dir' to specify where to store the working directory" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis2nd = Workflow(name='work_2nd', base_dir='/output/')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2nd-Level Design\n", - "\n", - "This step depends on your study design and the tests you want to perform. If you're using SPM to do the group analysis, you have the liberty to choose between a factorial design, a multiple regression design, one sample T-Test design, a paired T-Test design or a two sample T-Test design.\n", - "\n", - "For the current example, we will be using a one sample T-Test design." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import OneSampleTTestDesign" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the OneSampleTTestDesign node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "onesamplettestdes = Node(OneSampleTTestDesign(), name=\"onesampttestdes\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The next two steps are the same as for the 1st-level design, i.e. estimation of the model followed by estimation of the contrasts." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import EstimateModel, EstimateContrast" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate the EstimateModel and the EstimateContrast node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n", - " name=\"level2estimate\")\n", - "\n", - "level2conestimate = Node(EstimateContrast(group_contrast=True),\n", - " name=\"level2conestimate\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To finish the `EstimateContrast` node, we also need to specify which contrast should be computed. For a 2nd-level one sample t-test design, this is rather straight forward:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cont01 = ['Group', 'T', ['mean'], [1]]\n", - "level2conestimate.inputs.contrasts = [cont01]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's connect those three design nodes to each other." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect OneSampleTTestDesign, EstimateModel and EstimateContrast here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis2nd.connect([(onesamplettestdes, level2estimate, [('spm_mat_file',\n", - " 'spm_mat_file')]),\n", - " (level2estimate, level2conestimate, [('spm_mat_file',\n", - " 'spm_mat_file'),\n", - " ('beta_images',\n", - " 'beta_images'),\n", - " ('residual_image',\n", - " 'residual_image')])\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Thresholding of output contrast\n", - "\n", - "And to close, we will use SPM `Threshold`. With this routine, we can set a specific voxle threshold (i.e. *p*<0.001) and apply an FDR cluster threshold (i.e. *p*<0.05).\n", - "\n", - "As we only have 5 subjects, I recommend to set the voxel threshold to 0.01 and to leave the cluster threshold at 0.05." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.spm import Threshold" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "level2thresh = Node(Threshold(contrast_index=1,\n", - " use_topo_fdr=True,\n", - " use_fwe_correction=False,\n", - " extent_threshold=0,\n", - " height_threshold=0.01,\n", - " height_threshold_type='p-value',\n", - " extent_fdr_p_threshold=0.05),\n", - " name=\"level2thresh\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect the Threshold node to the EstimateContrast node herer" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis2nd.connect([(level2conestimate, level2thresh, [('spm_mat_file',\n", - " 'spm_mat_file'),\n", - " ('spmT_images',\n", - " 'stat_image'),\n", - " ])\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Gray Matter Mask\n", - "\n", - "We could run our 2nd-level workflow as it is. All the major nodes are there. But I nonetheless suggest that we use a gray matter mask to restrict the analysis to only gray matter voxels.\n", - "\n", - "In the 1st-level analysis, we normalized to SPM12's `TPM.nii` tissue probability atlas. Therefore, we could just take the gray matter probability map of this `TPM.nii` image (the first volume) and threshold it at a certain probability value to get a binary mask. This can of course also all be done in Nipype, but sometimes the direct bash code is quicker:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%bash\n", - "TEMPLATE='/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'\n", - "\n", - "# Extract the first volume with `fslroi`\n", - "fslroi $TEMPLATE GM_PM.nii.gz 0 1\n", - "\n", - "# Threshold the probability mask at 10%\n", - "fslmaths GM_PM.nii -thr 0.10 -bin /output/datasink_handson/GM_mask.nii.gz\n", - "\n", - "# Unzip the mask and delete the GM_PM.nii file\n", - "gunzip /output/datasink_handson/GM_mask.nii.gz\n", - "rm GM_PM.nii.gz" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's take a look at this mask:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import nibabel as nb\n", - "mask = nb.load('/output/datasink_handson/GM_mask.nii')\n", - "mask.orthoview()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we just need to specify this binary mask as an `explicit_mask_file` for the one sample T-test node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "onesamplettestdes.inputs.explicit_mask_file = '/output/datasink_handson/GM_mask.nii'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Datainput with `SelectFiles` and `iterables` \n", - "\n", - "We will again be using [`SelectFiles`](../../../nipype_tutorial/notebooks/basic_data_input.ipynb#SelectFiles) and [`iterables`](../../../nipype_tutorial/notebooks/basic_iteration.ipynb).\n", - "\n", - "So, what do we need? Actually, just the 1st-level contrasts of all subjects, separated by contrast number." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import the SelectFiles\n", - "from nipype import SelectFiles\n", - "\n", - "# String template with {}-based strings\n", - "templates = {'cons': '/output/datasink_handson/normalized/sub-*/w*_{cont_id}.nii'}\n", - "\n", - "# Create SelectFiles node\n", - "sf = Node(SelectFiles(templates, sort_filelist=True),\n", - " name='selectfiles')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are using `*` to tell `SelectFiles` that it can grab all available subjects and any contrast, with a specific contrast id, independnet if it's an t-contrast (`con`) or an F-contrast (`ess`) contrast.\n", - "\n", - "So, let's specify over which contrast the workflow should iterate." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# list of contrast identifiers\n", - "contrast_id_list = ['0001', '0002', '0003', '0004', '0005',\n", - " '0006', '0007', '0008', '0009']\n", - "sf.iterables = [('cont_id', contrast_id_list)]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we need to connect the `SelectFiles` to the `OneSampleTTestDesign` node." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "analysis2nd.connect([(sf, onesamplettestdes, [('cons', 'in_files')])])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Data output with `DataSink`\n", - "\n", - "Now, before we run the workflow, let's again specify a `Datasink` folder to only keep those files that we want to keep." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nipype.interfaces.io import DataSink" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Initiate DataSink node here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "# Initiate the datasink node\n", - "output_folder = 'datasink_handson'\n", - "datasink = Node(DataSink(base_directory='/output/',\n", - " container=output_folder),\n", - " name=\"datasink\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Use the following substitutions for the DataSink output\n", - "substitutions = [('_cont_id_', 'con_')]\n", - "datasink.inputs.substitutions = substitutions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now the next step is to specify all the output that we want to keep in our output folder `output`. Probably best to keep are the:\n", - "- the SPM.mat file and the spmT images from the `EstimateContrast` node\n", - "- the thresholded spmT images from the `Threshold` node" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden", - "solution2_first": true - }, - "outputs": [], - "source": [ - "# Connect nodes to datasink here" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "solution2": "hidden" - }, - "outputs": [], - "source": [ - "analysis2nd.connect([(level2conestimate, datasink, [('spm_mat_file',\n", - " '2ndLevel.@spm_mat'),\n", - " ('spmT_images',\n", - " '2ndLevel.@T'),\n", - " ('con_images',\n", - " '2ndLevel.@con')]),\n", - " (level2thresh, datasink, [('thresholded_map',\n", - " '2ndLevel.@threshold')])\n", - " ])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Visualize the workflow\n", - "\n", - "And we're good to go. Let's first take a look at the workflow." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create 1st-level analysis output graph\n", - "analysis2nd.write_graph(graph2use='colored', format='png', simple_form=True)\n", - "\n", - "# Visualize the graph\n", - "from IPython.display import Image\n", - "Image(filename='/output/work_2nd/graph.png')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run the Workflow\n", - "\n", - "Now that everything is ready, we can run the 2nd-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "analysis2nd.run('MultiProc', plugin_args={'n_procs': 4})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Visualize results\n", - "\n", - "Let's take a look at the results. Keep in mind that we only have *`N=6`* subjects and that we set the voxel threshold to a very liberal `p<0.01`. Interpretation of the results should therefore be taken with a lot of caution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nilearn.plotting import plot_glass_brain\n", - "%matplotlib inline\n", - "out_path = '/output/datasink_handson/2ndLevel/'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0001/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='average (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0002/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Finger (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0003/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Foot (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0004/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Lips (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0005/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Finger < others (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0006/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Foot < others (FDR corrected)');" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_glass_brain(out_path + 'con_0007/spmT_0001_thr.nii', display_mode='lyrz',\n", - " black_bg=True, colorbar=True, title='Lips > others (FDR corrected)');" - ] - } - ], - "metadata": { - "anaconda-cloud": {}, - "kernelspec": { - "display_name": "Python [default]", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 2f7f17bf0a902e273839f0c0dc8eaf61eee13351 Mon Sep 17 00:00:00 2001 From: ccbrain Date: Wed, 12 Sep 2018 23:26:57 +0100 Subject: [PATCH 7/7] Add files via upload --- tutorial_analysis.ipynb | 546 +++++++++++++++++++++ tutorial_preprocessing.ipynb | 885 +++++++++++++++++++++++++++++++++++ 2 files changed, 1431 insertions(+) create mode 100644 tutorial_analysis.ipynb create mode 100644 tutorial_preprocessing.ipynb diff --git a/tutorial_analysis.ipynb b/tutorial_analysis.ipynb new file mode 100644 index 0000000..4a26233 --- /dev/null +++ b/tutorial_analysis.ipynb @@ -0,0 +1,546 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hands-on 2: How to create a fMRI analysis workflow\n", + "\n", + "The purpose of this section is that you setup a fMRI analysis workflow. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1st-level Analysis Workflow Structure\n", + "\n", + "In this notebook we will create a workflow that performs 1st-level analysis and normalizes the resulting beta weights to the MNI template. In concrete steps this means:\n", + "\n", + " 1. Specify 1st-level model parameters\n", + " 2. Specify 1st-level contrasts\n", + " 3. Estimate 1st-level contrasts\n", + " 4. Normalize 1st-level contrasts" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports\n", + "\n", + "It's always best to have all relevant module imports at the beginning of your script. So let's import what we most certainly need." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn import plotting\n", + "%matplotlib inline\n", + "\n", + "# Get the Node and Workflow object\n", + "from nipype import Node, Workflow\n", + "\n", + "# Specify which SPM to use\n", + "from nipype.interfaces.matlab import MatlabCommand\n", + "MatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Nodes and Workflow connections\n", + "\n", + "Let's create all the nodes that we need! Make sure to specify all relevant inputs and keep in mind which ones you later on need to connect in your pipeline.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Specify 1st-level model parameters (stimuli onsets, duration, etc.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The specify the 1st-level model we need the subject specific onset times and durations of the stimuli. Luckily, as we are working with a BIDS dataset, this information is nicely stored in a `tsv` file:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "# Create the workflow here\n", + "analysis1st = Workflow(name='work_1st', base_dir='/output/')\n", + "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", + "trialinfo\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from nipype.interfaces.base import Bunch\n", + "for group in trialinfo.groupby('trial_type'):\n", + " print(group)\n", + " print(\"\")\n", + "trialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\n", + "conditions = []\n", + "onsets = []\n", + "durations = []\n", + "\n", + "for group in trialinfo.groupby('trial_type'):\n", + " conditions.append(group[0])\n", + " onsets.append(list(group[1].onset -10)) # subtracting 10s due to removing of 4 dummy scans\n", + " durations.append(group[1].duration.tolist())\n", + "\n", + "subject_info = [Bunch(conditions=conditions,\n", + " onsets=onsets,\n", + " durations=durations,\n", + " )]\n", + "\n", + "from nipype.algorithms.modelgen import SpecifySPMModel\n", + "\n", + "# Initiate the SpecifySPMModel node here\n", + "modelspec = Node(SpecifySPMModel(concatenate_runs=False,\n", + " input_units='secs',\n", + " output_units='secs',\n", + " time_repetition=2.5,\n", + " high_pass_filter_cutoff=128,\n", + " subject_info=subject_info),\n", + " name=\"modelspec\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This node will also need some additional inputs, such as the preprocessed functional images, the motion parameters etc. We will specify those once we take care of the workflow data input stream." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Specify 1st-level contrasts\n", + "\n", + "To do any GLM analysis, we need to also define the contrasts that we want to investigate. If we recap, we had three different conditions in the **fingerfootlips** task in this dataset:\n", + "\n", + "- **finger**\n", + "- **foot**\n", + "- **lips**\n", + "\n", + "Therefore, we could create the following contrasts (seven T-contrasts and two F-contrasts):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Condition names\n", + "condition_names = ['Finger', 'Foot', 'Lips']\n", + "\n", + "# Contrasts\n", + "cont01 = ['average', 'T', condition_names, [1/3., 1/3., 1/3.]]\n", + "cont02 = ['Finger', 'T', condition_names, [1, 0, 0]]\n", + "cont03 = ['Foot', 'T', condition_names, [0, 1, 0]]\n", + "cont04 = ['Lips', 'T', condition_names, [0, 0, 1]]\n", + "cont05 = ['Finger > others','T', condition_names, [1, -0.5, -0.5]]\n", + "cont06 = ['Foot > others', 'T', condition_names, [-0.5, 1, -0.5]]\n", + "cont07 = ['Lips > others', 'T', condition_names, [-0.5, -0.5, 1]]\n", + "\n", + "cont08 = ['activation', 'F', [cont02, cont03, cont04]]\n", + "cont09 = ['differences', 'F', [cont05, cont06, cont07]]\n", + "\n", + "contrast_list = [cont01, cont02, cont03, cont04, cont05, cont06, cont07, cont08, cont09]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Estimate 1st-level contrasts\n", + "\n", + "Before we can estimate the 1st-level contrasts, we first need to create the 1st-level design. Here you can also specify what kind of basis function you want (HRF, FIR, Fourier, etc.), if you want to use time and dispersion derivatives and how you want to model the serial correlation.\n", + "\n", + "In this example I propose that you use an HRF basis function, that we model time derivatives and that we model the serial correlation with AR(1)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import Level1Design\n", + "# Initiate the Level1Design node here\n", + "level1design = Node(Level1Design(bases={'hrf': {'derivs': [1, 0]}},\n", + " timing_units='secs',\n", + " interscan_interval=2.5,\n", + " model_serial_correlations='AR(1)'),\n", + " name=\"level1design\")\n", + "\n", + "\n", + "# Now that we have the Model Specification and 1st-Level Design node, we can connect them to each other:\n", + "# Connect the two nodes here\n", + "analysis1st.connect([(modelspec, level1design, [('session_info',\n", + " 'session_info')])])\n", + "\n", + "# Now we need to estimate the model. I recommend that you'll use a Classical: 1 method to estimate the model.\n", + "from nipype.interfaces.spm import EstimateModel\n", + "# Initiate the EstimateModel node here\n", + "level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n", + " name=\"level1estimate\")\n", + "\n", + "# Now we can connect the 1st-Level Design node with the model estimation node.\n", + "# Connect the two nodes here\n", + "analysis1st.connect([(level1design, level1estimate, [('spm_mat_file',\n", + " 'spm_mat_file')])])\n", + "from nipype.interfaces.spm import EstimateContrast\n", + "# Initiate the EstimateContrast node here\n", + "level1conest = Node(EstimateContrast(contrasts=contrast_list),\n", + " name=\"level1conest\")\n", + "\n", + "# Now we can connect the model estimation node with the contrast estimation node.\n", + "analysis1st.connect([(level1estimate, level1conest, [('spm_mat_file',\n", + " 'spm_mat_file'),\n", + " ('beta_images',\n", + " 'beta_images'),\n", + " ('residual_image',\n", + " 'residual_image')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Normalize 1st-level contrasts\n", + "\n", + "Now that the contrasts were estimated in subject space we can put them into a common reference space by normalizing them to a specific template. In this case we will be using SPM12's Normalize routine and normalize to the SPM12 tissue probability map `TPM.nii`.\n", + "\n", + "At this step you can also specify the voxel resolution of the output volumes. If you don't specify it, it will normalize to a voxel resolution of 2x2x2mm. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import Normalize12\n", + "\n", + "# Location of the template\n", + "template = '/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'\n", + "\n", + "# Initiate the Normalize12 node here\n", + "normalize = Node(Normalize12(jobtype='estwrite',\n", + " tpm=template,\n", + " write_voxel_sizes=[2, 2, 2]\n", + " ),\n", + " name=\"normalize\")\n", + "\n", + "# Connect the nodes here\n", + "analysis1st.connect([(level1conest, normalize, [('spmT_images',\n", + " 'apply_to_files')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Datainput with `SelectFiles` and `iterables` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the SelectFiles\n", + "from nipype import SelectFiles\n", + "\n", + "# String template with {}-based strings\n", + "templates = {'anat': '/data/ds000114/sub-{subj_id}/ses-test/anat/sub-{subj_id}_ses-test_T1w.nii.gz',\n", + " 'func': '/output/datasink_handson/preproc/sub-{subj_id}_detrend.nii.gz',\n", + " 'mc_param': '/output/datasink_handson/preproc/sub-{subj_id}.par',\n", + " 'outliers': '/output/datasink_handson/preproc/art.sub-{subj_id}_outliers.txt'\n", + " }\n", + "\n", + "# Create SelectFiles node\n", + "sf = Node(SelectFiles(templates, sort_filelist=True),\n", + " name='selectfiles')\n", + "\n", + "# Now we can specify over which subjects the workflow should iterate. \n", + "# list of subject identifiers\n", + "subject_list = ['07']\n", + "sf.iterables = [('subj_id', subject_list)]\n", + "\n", + "\n", + "# Gunzip Node\n", + "\n", + "from nipype.algorithms.misc import Gunzip\n", + "# Initiate the two Gunzip node here\n", + "gunzip_anat = Node(Gunzip(), name='gunzip_anat')\n", + "gunzip_func = Node(Gunzip(), name='gunzip_func')\n", + "\n", + "\n", + "# And as a final step, we just need to connect this SelectFiles node to the rest of the workflow.\n", + "# Connect SelectFiles node to the other nodes here\n", + "analysis1st.connect([(sf, gunzip_anat, [('anat', 'in_file')]),\n", + " (sf, gunzip_func, [('func', 'in_file')]),\n", + " (gunzip_anat, normalize, [('out_file', 'image_to_align')]),\n", + " (gunzip_func, modelspec, [('out_file', 'functional_runs')]),\n", + " (sf, modelspec, [('mc_param', 'realignment_parameters'),\n", + " ('outliers', 'outlier_files'),\n", + " ])\n", + " ])\n", + "\n", + "#Data output with DataSink\n", + "#Now, before we run the workflow, let's again specify a Datasink folder to only keep those files that we want to keep.\n", + "from nipype.interfaces.io import DataSink\n", + "# Initiate DataSink node here\n", + "# Initiate the datasink node\n", + "output_folder = 'datasink_handson'\n", + "datasink = Node(DataSink(base_directory='/output/',\n", + " container=output_folder),\n", + " name=\"datasink\")\n", + "## Use the following substitutions for the DataSink output\n", + "substitutions = [('_subj_id_', 'sub-')]\n", + "datasink.inputs.substitutions = substitutions\n", + "\n", + "# Connect nodes to datasink here\n", + "analysis1st.connect([(level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),\n", + " ('spmT_images', '1stLevel.@T'),\n", + " ('spmF_images', '1stLevel.@F'),\n", + " ]),\n", + " (normalize, datasink, [('normalized_files', 'normalized.@files'),\n", + " ('normalized_image', 'normalized.@image'),\n", + " ]),\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the workflow\n", + "\n", + "Now that the workflow is finished, let's visualize it again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create 1st-level analysis output graph\n", + "analysis1st.write_graph(graph2use='colored', format='png', simple_form=True)\n", + "\n", + "# Visualize the graph\n", + "from IPython.display import Image\n", + "Image(filename='/output/work_1st/graph.png')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Workflow\n", + "\n", + "Now that everything is ready, we can run the 1st-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "analysis1st.run('MultiProc', plugin_args={'n_procs': 4})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### First, let's look at the 1st-level Design Matrix of subject one, to verify that everything is as it should be." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "from scipy.io import loadmat\n", + "\n", + "# Using scipy's loadmat function we can access SPM.mat\n", + "spmmat = loadmat('/output/datasink_handson/1stLevel/sub-07/SPM.mat',\n", + " struct_as_record=False)\n", + "designMatrix = spmmat['SPM'][0][0].xX[0][0].X\n", + "names = [i[0] for i in spmmat['SPM'][0][0].xX[0][0].name[0]]\n", + "normed_design = designMatrix / np.abs(designMatrix).max(axis=0)\n", + "fig, ax = plt.subplots(figsize=(8, 8))\n", + "plt.imshow(normed_design, aspect='auto', cmap='gray', interpolation='none')\n", + "ax.set_ylabel('Volume id')\n", + "ax.set_xticks(np.arange(len(names)))\n", + "ax.set_xticklabels(names, rotation=90);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Let's look how well the normalization worked." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nb\n", + "from nilearn.plotting import plot_anat\n", + "from nilearn.plotting import plot_glass_brain\n", + "# Load GM probability map of TPM.nii\n", + "img = nb.load('/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii')\n", + "GM_template = nb.Nifti1Image(img.get_data()[..., 0], img.affine, img.header)\n", + "\n", + "# Plot normalized subject anatomy\n", + "display = plot_anat('/output/datasink_handson/normalized/sub-07/wsub-07_ses-test_T1w.nii',\n", + " dim=-0.1)\n", + "\n", + "# Overlay in edges GM map\n", + "display.add_edges(GM_template)\n", + "\n", + "# Plot raw subject anatomy\n", + "display = plot_anat('/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz',\n", + " dim=-0.1)\n", + "\n", + "# Overlay in edges GM map\n", + "display.add_edges(GM_template)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Let's look at the contrasts of one subject that we've just computed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn.plotting import plot_stat_map\n", + "anatimg = '/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz'\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0001.nii', title='average',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0002.nii', title='finger',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0003.nii', title='foot',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0004.nii', title='lip',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### We can also check three additional contrasts Finger > others, Foot > others and Lips > others." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0005.nii', title='fingers > other',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0006.nii', title='foot > other',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\n", + "plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0007.nii', title='lip > other',\n", + " bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### We can plot the normalized results over a template brain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0005.nii',\n", + " colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n", + " title='fingers>other');\n", + "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0006.nii',\n", + " colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n", + " title='foot>other');\n", + "plot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0007.nii',\n", + " colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n", + " title='lip>other');" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python [default]", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tutorial_preprocessing.ipynb b/tutorial_preprocessing.ipynb new file mode 100644 index 0000000..2f03642 --- /dev/null +++ b/tutorial_preprocessing.ipynb @@ -0,0 +1,885 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Demo: FMRI preprocessing and simple analysis" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preparation\n", + "\n", + "Before we can start with anything we first need to download the data. \n", + "We will use structural and functional data from a single right-handed subject.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "datalad get -J 4 /data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz \\\n", + " /data/ds000114/sub-07/ses-test/func/*fingerfootlips*" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Imports\n", + "It's always best to have all relevant module imports at the beginning of your script. So let's import what we most certainly need." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the Node and Workflow object\n", + "from nipype import Node, Workflow\n", + "\n", + "# Specify which SPM to use\n", + "from nipype.interfaces.matlab import MatlabCommand\n", + "MatlabCommand.set_default_paths('/opt/spm12/spm12_mcr/spm/spm12')\n", + "\n", + "from nilearn import image as nli\n", + "from nilearn.image.image import mean_img\n", + "from nilearn.plotting import plot_epi, plot_anat\n", + "from nipype.algorithms.misc import Gunzip\n", + "import nibabel as nb\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Overview of the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Plot anatomical and functional data\n", + "%matplotlib inline\n", + "plot_anat('/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz',draw_cross=True,vmax=900,vmin=200,cut_coords=[0, 0, 0])\n", + "plot_epi(mean_img('/data/ds000114/sub-07/ses-test/func/sub-07_ses-test_task-fingerfootlips_bold.nii.gz'))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Preprocessing Workflow Structure\n", + "\n", + "For the preprocessing workflow, we use the following nodes:\n", + "\n", + " 1. Gunzip (Nipype)\n", + " 2. Drop Dummy Scans (FSL)\n", + " 3. Slice Time Correction (SPM)\n", + " 4. Motion Correction (SPM)\n", + " 5. Artifact Detection\n", + " 6. Segmentation (SPM)\n", + " 7. Coregistration (FSL)\n", + " 8. Smoothing (FSL)\n", + " 9. Apply Binary Mask (FSL)\n", + " 10. Remove Linear Trends (Nipype)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Nodes and Workflow connections\n", + "\n", + "Let's create all the nodes that we need! \n", + "\n", + "### Workflow\n", + "We first need to create a workflow:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create the workflow here\n", + "preproc = Workflow(name='work_preproc', base_dir='/output/')\n", + "\n", + "# Specify example input file\n", + "func_file = '/data/ds000114/sub-07/ses-test/func/sub-07_ses-test_task-fingerfootlips_bold.nii.gz'\n", + "\n", + "# Initiate Gunzip node\n", + "gunzip_func = Node(Gunzip(in_file=func_file), name='gunzip_func')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Drop Dummy Scans\n", + "\n", + "The functional images of this dataset were recorded with 4 dummy scans at the beginning (see the [corresponding publication](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3641991/)). But those dummy scans were not yet taken out from the functional images.\n", + "\n", + "To better illustrate this, let's plot the time course of a random voxel of the just defined `func_file`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "plt.plot(nb.load(func_file).get_fdata()[32, 32, 15, :]);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the figure above, we see that at the very beginning there are extreme values, which hint to the fact that steady state wasn't reached yet. Therefore, we want to exclude the dummy scans from the original data. This can be achieved with FSL's `ExtractROI`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.fsl import ExtractROI\n", + "extract = Node(ExtractROI(t_min=4, t_size=-1, output_type='NIFTI'),\n", + " name=\"extract\")\n", + "\n", + "# This ExtractROI node can now be connected to the gunzip_func node from above. To do this, we use the following command:\n", + "preproc.connect([(gunzip_func, extract, [('out_file', 'in_file')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Slice Time Correction\n", + "\n", + "Now to the next step. Let's us SPM's `SliceTiming` to correct for slice wise acquisition of the volumes. As a reminder, the tutorial dataset was recorded...\n", + "- with a time repetition (TR) of 2.5 seconds\n", + "- with 30 slices per volume\n", + "- in an interleaved fashion, i.e. slice order is [1, 3, 5, 7, ..., 2, 4, 6, ..., 30]\n", + "- with a time acquisition (TA) of 2.4167 seconds, i.e. `TR-(TR/num_slices)`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import SliceTiming\n", + "slice_order = list(range(1, 31, 2)) + list(range(2, 31, 2))\n", + "print(slice_order)\n", + "\n", + "# Initiate SliceTiming node here\n", + "slicetime = Node(SliceTiming(num_slices=30,\n", + " ref_slice=15,\n", + " slice_order=slice_order,\n", + " time_repetition=2.5,\n", + " time_acquisition=2.5-(2.5/30)),\n", + " name='slicetime')\n", + "\n", + "# Connect SliceTiming node to the other nodes here\n", + "preproc.connect([(extract, slicetime, [('roi_file', 'in_files')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Motion Correction\n", + "\n", + "To correct for motion in the scanner, we will be using FSL's `MCFLIRT`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.fsl import MCFLIRT\n", + "\n", + "# Initate MCFLIRT node here\n", + "mcflirt = Node(MCFLIRT(mean_vol=True,\n", + " save_plots=True),\n", + " name=\"mcflirt\")\n", + "\n", + "# Connect the MCFLIRT node to the rest of the workflow.\n", + "preproc.connect([(slicetime, mcflirt, [('timecorrected_files', 'in_file')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Artifact Detection\n", + "\n", + "We will use the really cool and useful `ArtifactDetection` tool from Nipype to detect motion and intensity outliers in the functional images. The interface is initiated as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.algorithms.rapidart import ArtifactDetect\n", + "art = Node(ArtifactDetect(norm_threshold=2,\n", + " zintensity_threshold=3,\n", + " mask_type='spm_global',\n", + " parameter_source='FSL',\n", + " use_differences=[True, False],\n", + " plot_type='svg'),\n", + " name=\"art\")\n", + "\n", + "preproc.connect([(mcflirt, art, [('out_file', 'realigned_files'),\n", + " ('par_file', 'realignment_parameters')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The parameters above mean the following:\n", + "- `norm_threshold` - Threshold to use to detect motion-related outliers when composite motion is being used\n", + "- `zintensity_threshold` - Intensity Z-threshold use to detection images that deviate from the mean\n", + "- `mask_type` - Type of mask that should be used to mask the functional data. *spm_global* uses an spm_global like calculation to determine the brain mask\n", + "- `parameter_source` - Source of movement parameters\n", + "- `use_differences` - If you want to use differences between successive motion (first element) and intensity parameter (second element) estimates in order to determine outliers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Segmentation of anatomical image\n", + "\n", + "Now let's work on the anatomical image. In particular, let's use SPM's `NewSegment` to create probability maps for the gray matter, white matter tissue and CSF." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.spm import NewSegment\n", + "# Use the following tissue specification to get a GM and WM probability map\n", + "tpm_img ='/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'\n", + "tissue1 = ((tpm_img, 1), 1, (True,False), (False, False))\n", + "tissue2 = ((tpm_img, 2), 1, (True,False), (False, False))\n", + "tissue3 = ((tpm_img, 3), 2, (True,False), (False, False))\n", + "tissue4 = ((tpm_img, 4), 3, (False,False), (False, False))\n", + "tissue5 = ((tpm_img, 5), 4, (False,False), (False, False))\n", + "tissue6 = ((tpm_img, 6), 2, (False,False), (False, False))\n", + "tissues = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6]\n", + "\n", + "# Initate NewSegment node here\n", + "segment = Node(NewSegment(tissues=tissues), name='segment')\n", + "\n", + "# Specify example input file\n", + "anat_file = '/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz'\n", + "\n", + "# Initiate Gunzip node\n", + "gunzip_anat = Node(Gunzip(in_file=anat_file), name='gunzip_anat')\n", + "\n", + "preproc.connect([(gunzip_anat, segment, [('out_file', 'channel_files')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Compute Coregistration Matrix\n", + "\n", + "As a next step we will make sure that the functional images are coregistered to the anatomical image. For this we will use FSL's `FLIRT` function. As we just created a white matter probability map, we can use this together with the Boundary-Based Registration (BBR) cost function to optimize the image coregistration. As some helpful notes...\n", + "- use a degree of freedom of 6\n", + "- specify the cost function as `bbr`\n", + "- use the `schedule='/usr/share/fsl/5.0/etc/flirtsch/bbr.sch'`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.fsl import FLIRT\n", + "\n", + "# Initate FLIRT node here\n", + "coreg = Node(FLIRT(dof=6,\n", + " cost='bbr',\n", + " schedule='/usr/share/fsl/5.0/etc/flirtsch/bbr.sch',\n", + " output_type='NIFTI'),\n", + " name=\"coreg\")\n", + "\n", + "preproc.connect([(gunzip_anat, coreg, [('out_file', 'reference')]),\n", + " (mcflirt, coreg, [('mean_img', 'in_file')])\n", + " ])\n", + "\n", + "\n", + "from nipype.interfaces.fsl import Threshold\n", + "\n", + "# Threshold - Threshold WM probability image\n", + "threshold_WM = Node(Threshold(thresh=0.5,\n", + " args='-bin',\n", + " output_type='NIFTI'),\n", + " name=\"threshold_WM\")\n", + "\n", + "# Select WM segmentation file from segmentation output\n", + "def get_wm(files):\n", + " return files[1][0]\n", + "\n", + "# Connecting the segmentation node with the threshold node\n", + "preproc.connect([(segment, threshold_WM, [(('native_class_images', get_wm),\n", + " 'in_file')])])\n", + "\n", + "# Now we can just connect this Threshold node to the coregistration node from above.\n", + "preproc.connect([(threshold_WM, coreg, [('out_file', 'wm_seg')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Apply Coregistration Matrix to functional image\n", + "\n", + "Now that we know the coregistration matrix to correctly overlay the functional mean image on the subject specific anatomy, we need to apply to coregistration to the whole time series. This can be achieved with FSL's `FLIRT` as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Specify the isometric voxel resolution you want after coregistration\n", + "desired_voxel_iso = 4\n", + "\n", + "# Apply coregistration warp to functional images\n", + "applywarp = Node(FLIRT(interp='spline',\n", + " apply_isoxfm=desired_voxel_iso,\n", + " output_type='NIFTI'),\n", + " name=\"applywarp\")\n", + "\n", + "# Connecting the ApplyWarp node to all the other nodes\n", + "preproc.connect([(mcflirt, applywarp, [('out_file', 'in_file')]),\n", + " (coreg, applywarp, [('out_matrix_file', 'in_matrix_file')]),\n", + " (gunzip_anat, applywarp, [('out_file', 'reference')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Smoothing\n", + "\n", + "Next step is image smoothing. The most simple way to do this is to use FSL's or SPM's `Smooth` function. But for learning purposes, let's use FSL's `SUSAN` workflow as it is implemented in Nipype. Note that this time, we are importing a workflow instead of an interface." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.workflows.fmri.fsl.preprocess import create_susan_smooth\n", + "\n", + "# Initate SUSAN workflow here\n", + "susan = create_susan_smooth(name='susan')\n", + "susan.inputs.inputnode.fwhm = 4\n", + "\n", + "# Connect Threshold node to coregistration node above here\n", + "preproc.connect([(applywarp, susan, [('out_file', 'inputnode.in_files')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Binary Mask\n", + "\n", + "There are many possible approaches on how you can mask your functional images. One of them is not at all, one is with a simple brain mask and one that only considers certain kind of brain tissue, e.g. gray matter.\n", + "\n", + "For the current example, we want to create a dilated gray matter mask. For this purpose we need to:\n", + "1. Resample the gray matter probability map to the same resolution as the functional images\n", + "2. Threshold this resampled probability map at a specific value\n", + "3. Dilate this mask by some voxels to make the mask less conservative and more inclusive\n", + "\n", + "The first step can be done in many ways (eg. using freesurfer's `mri_convert`, `nibabel`) but in our case we will use FSL's `FLIRT`. The trick is to use the probability mask, as input file and reference file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.fsl import FLIRT\n", + "from nipype.interfaces.fsl import Threshold\n", + "\n", + "# Initiate resample node\n", + "resample = Node(FLIRT(apply_isoxfm=desired_voxel_iso,\n", + " output_type='NIFTI'),\n", + " name=\"resample\")\n", + "\n", + "\n", + "# Threshold - Threshold GM probability image\n", + "mask_GM = Node(Threshold(thresh=0.5,\n", + " args='-bin -dilF',\n", + " output_type='NIFTI'),\n", + " name=\"mask_GM\")\n", + "\n", + "# Select GM segmentation file from segmentation output\n", + "def get_gm(files):\n", + " return files[0][0]\n", + "\n", + "# Now we can connect the resample and the gray matter mask node to the segmentation node and each other.\n", + "preproc.connect([(segment, resample, [(('native_class_images', get_gm), 'in_file'),\n", + " (('native_class_images', get_gm), 'reference')\n", + " ]),\n", + " (resample, mask_GM, [('out_file', 'in_file')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Apply the binary mask\n", + "\n", + "Now we can connect this dilated gray matter mask to the susan node, as well as actually applying this to the resulting smoothed images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Connect gray matter Mask node to the susan workflow here\n", + "preproc.connect([(mask_GM, susan, [('out_file', 'inputnode.mask_file')])])\n", + "\n", + "# To apply the mask to the smoothed functional images, we will use FSL's ApplyMask interface.\n", + "from nipype.interfaces.fsl import ApplyMask\n", + "from nipype import MapNode\n", + "# Initate ApplyMask node here\n", + "mask_func = MapNode(ApplyMask(output_type='NIFTI'),\n", + " name=\"mask_func\", \n", + " iterfield=[\"in_file\"])\n", + "# Connect smoothed susan output file to ApplyMask node here\n", + "preproc.connect([(susan, mask_func, [('outputnode.smoothed_files', 'in_file')]),\n", + " (mask_GM, mask_func, [('out_file', 'mask_file')])\n", + " ])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Remove linear trends in functional images\n", + "\n", + "Last but not least. Let's use Nipype's `TSNR` module to remove linear and quadratic trends in the functionally smoothed images. For this you only have to specify the `regress_poly` parameter in the node initiation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.algorithms.confounds import TSNR\n", + "# Initate TSNR node here\n", + "detrend = Node(TSNR(regress_poly=2), name=\"detrend\")\n", + "\n", + "# Connect the detrend node to the other nodes here\n", + "preproc.connect([(mask_func, detrend, [('out_file', 'in_file')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Datainput with `SelectFiles` and `iterables` \n", + "\n", + "This is all nice and well. But so far we still had to specify the input values for `gunzip_anat` and `gunzip_func` ourselves. How can we scale this up to multiple subjects and/or multiple functional images and make the workflow take the input directly from the BIDS dataset?\n", + "\n", + "For this, we need [`SelectFiles`](../../../nipype_tutorial/notebooks/basic_data_input.ipynb#SelectFiles) and [`iterables`](../../../nipype_tutorial/notebooks/basic_iteration.ipynb)! It's rather simple, specify a template and fill-up the placeholder variables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import the SelectFiles\n", + "from nipype import SelectFiles\n", + "\n", + "# String template with {}-based strings\n", + "templates = {'anat': 'sub-{subject_id}/ses-{ses_id}/anat/'\n", + " 'sub-{subject_id}_ses-test_T1w.nii.gz',\n", + " 'func': 'sub-{subject_id}/ses-{ses_id}/func/'\n", + " 'sub-{subject_id}_ses-{ses_id}_task-{task_id}_bold.nii.gz'}\n", + "\n", + "# Create SelectFiles node\n", + "sf = Node(SelectFiles(templates,\n", + " base_directory='/data/ds000114',\n", + " sort_filelist=True),\n", + " name='selectfiles')\n", + "sf.inputs.ses_id='test'\n", + "sf.inputs.task_id='fingerfootlips'\n", + "\n", + "# Now we can specify over which subjects the workflow should iterate. To test the workflow, let's still just look at subject 7.\n", + "subject_list = ['07']\n", + "sf.iterables = [('subject_id', subject_list)]\n", + "\n", + "# Connect SelectFiles node to the other nodes here\n", + "preproc.connect([(sf, gunzip_anat, [('anat', 'in_file')]),\n", + " (sf, gunzip_func, [('func', 'in_file')])])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the workflow\n", + "\n", + "Now that we're done. Let's look at the workflow that we just created." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create preproc output graph\n", + "preproc.write_graph(graph2use='colored', format='png', simple_form=True)\n", + "\n", + "# Visualize the graph\n", + "from IPython.display import Image\n", + "Image(filename='/output/work_preproc/graph.png', width=750)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Workflow\n", + "\n", + "Now we are ready to run the workflow! Be careful about the `n_procs` parameter if you run a workflow in `'MultiProc'` mode. `n_procs` specifies the number of jobs/cores your computer will use to run the workflow. If this number is too high your computer will try to execute too many things at once and will most likely crash.\n", + "\n", + "**Note**: If you're using a Docker container and FLIRT fails to run without any good reason, you might need to change memory settings in the Docker preferences (6 GB should be enough for this workflow)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "preproc.run('MultiProc', plugin_args={'n_procs': 2})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inspect output\n", + "\n", + "What did we actually do? Let's look at all the data that was created." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!tree /output/work_preproc/ -I '*js|*json|*pklz|_report|*dot|*html|*txt|*.m'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But what did we do specifically? Well, let's investigate." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Motion Correction and Artifact Detection\n", + "\n", + "How much did the subject move in the scanner and where there any outliers in the functional images?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "# Plot the motion paramters\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "par = np.loadtxt('/output/work_preproc/_subject_id_07/mcflirt/'\n", + " 'asub-07_ses-test_task-fingerfootlips_bold_roi_mcf.nii.gz.par')\n", + "fig, axes = plt.subplots(2, 1, figsize=(15, 5))\n", + "axes[0].set_ylabel('rotation (radians)')\n", + "axes[0].plot(par[0:, :3])\n", + "axes[1].plot(par[0:, 3:])\n", + "axes[1].set_xlabel('time (TR)')\n", + "axes[1].set_ylabel('translation (mm)');" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The motion parameters seems to look ok. What about the detection of artifacts?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Showing the artifact detection output\n", + "from IPython.display import SVG\n", + "SVG(filename='/output/work_preproc/_subject_id_07/art/'\n", + " 'plot.asub-07_ses-test_task-fingerfootlips_bold_roi_mcf.svg')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Masks and Probability maps\n", + "\n", + "Let's see what all the masks and probability maps look like. For this we will use `nilearn`'s `plot_anat` function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn import image as nli\n", + "from nilearn.plotting import plot_stat_map\n", + "%matplotlib inline\n", + "output = '/output/work_preproc/_subject_id_07/'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's look at the tissue probability maps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "anat = output + 'gunzip_anat/sub-07_ses-test_T1w.nii'\n", + "plot_stat_map(\n", + " output + 'segment/c1sub-07_ses-test_T1w.nii', title='GM prob. map', cmap=plt.cm.magma,\n", + " threshold=0.5, bg_img=anat, display_mode='z', cut_coords=range(-35, 15, 10), dim=-1);\n", + "plot_stat_map(\n", + " output + 'segment/c2sub-07_ses-test_T1w.nii', title='WM prob. map', cmap=plt.cm.magma,\n", + " threshold=0.5, bg_img=anat, display_mode='z', cut_coords=range(-35, 15, 10), dim=-1);\n", + "plot_stat_map(\n", + " output + 'segment/c3sub-07_ses-test_T1w.nii', title='CSF prob. map', cmap=plt.cm.magma,\n", + " threshold=0.5, bg_img=anat, display_mode='z', cut_coords=range(-35, 15, 10), dim=-1);\n", + "plot_stat_map(\n", + " output + 'mask_GM/c1sub-07_ses-test_T1w_flirt_thresh.nii', title='dilated GM Mask', cmap=plt.cm.magma,\n", + " threshold=0.5, bg_img=anat, display_mode='z', cut_coords=range(-35, 15, 10), dim=-1);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Functional Image transformations\n", + "\n", + "Let's also investigate the transformation that we applied to the functional images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nilearn import image as nli\n", + "from nilearn.plotting import plot_epi\n", + "%matplotlib inline\n", + "output = '/output/work_preproc/_subject_id_07/'\n", + "\n", + "plot_epi(output + 'mcflirt/asub-07_ses-test_task-fingerfootlips_bold_roi_mcf.nii.gz_mean_reg.nii.gz',\n", + " title='Motion Corrected mean image', display_mode='z', cut_coords=range(-40, 21, 15),\n", + " cmap=plt.cm.viridis);\n", + "\n", + "mean = nli.mean_img(output + 'applywarp/asub-07_ses-test_task-fingerfootlips_bold_roi_mcf_flirt.nii')\n", + "plot_epi(mean, title='Coregistred mean image', display_mode='z', cut_coords=range(-40, 21, 15),\n", + " cmap=plt.cm.viridis);\n", + "\n", + "mean = nli.mean_img('/output/work_preproc/susan/_subject_id_07/smooth/mapflow/_smooth0/'\n", + " 'asub-07_ses-test_task-fingerfootlips_bold_roi_mcf_flirt_smooth.nii.gz')\n", + "plot_epi(mean, title='Smoothed mean image', display_mode='z', cut_coords=range(-40, 21, 15),\n", + " cmap=plt.cm.viridis);\n", + "\n", + "mean = nli.mean_img(output + 'mask_func/mapflow/_mask_func0/'\n", + " 'asub-07_ses-test_task-fingerfootlips_bold_roi_mcf_flirt_smooth_masked.nii')\n", + "plot_epi(mean, title='Masked mean image', display_mode='z', cut_coords=range(-40, 21, 15),\n", + " cmap=plt.cm.viridis);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's all nice and beautiful, but what did smoothing and detrending actually do to the data?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import nibabel as nb\n", + "%matplotlib inline\n", + "output = '/output/work_preproc/_subject_id_07/'\n", + "\n", + "# Load the relevant datasets\n", + "mc = nb.load(output + 'applywarp/asub-07_ses-test_task-fingerfootlips_bold_roi_mcf_flirt.nii')\n", + "smooth = nb.load('/output/work_preproc/susan/_subject_id_07/smooth/mapflow/'\n", + " '_smooth0/asub-07_ses-test_task-fingerfootlips_bold_roi_mcf_flirt_smooth.nii.gz')\n", + "detrended_data = nb.load(output + 'detrend/detrend.nii.gz')\n", + "\n", + "# Plot a representative voxel\n", + "x, y, z = 32, 36, 46\n", + "fig = plt.figure(figsize=(12, 4))\n", + "plt.plot(mc.get_data()[x, y, z, :])\n", + "plt.plot(smooth.get_data()[x, y, z, :])\n", + "plt.plot(detrended_data.get_data()[x, y, z, :])\n", + "plt.legend(['motion corrected', 'smoothed', 'detrended']);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Data output with `DataSink`\n", + "\n", + "The results look fine, but we don't need all those temporary files. So let's use Datasink to keep only those files that we actually need for the 1st level analysis." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now the next step is to specify all the output that we want to keep in our output folder `output`. Make sure to keep:\n", + "- from the artifact detection node the outlier file as well as the outlier plot\n", + "- from the motion correction node the motion parameters\n", + "- from the last node, the detrended functional image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nipype.interfaces.io import DataSink\n", + "\n", + "# Initiate the datasink node\n", + "output_folder = 'datasink_handson'\n", + "datasink = Node(DataSink(base_directory='/output/',\n", + " container=output_folder),\n", + " name=\"datasink\")\n", + "\n", + "# Connect nodes to datasink here\n", + "preproc.connect([(art, datasink, [('outlier_files', 'preproc.@outlier_files'),\n", + " ('plot_files', 'preproc.@plot_files')]),\n", + " (mcflirt, datasink, [('par_file', 'preproc.@par')]),\n", + " (detrend, datasink, [('detrended_file', 'preproc.@func')]),\n", + " ])\n", + "\n", + "## Use the following substitutions for the DataSink output\n", + "substitutions = [('asub', 'sub'),\n", + " ('_ses-test_task-fingerfootlips_bold_roi_mcf', ''),\n", + " ('.nii.gz.par', '.par'),\n", + " ]\n", + "\n", + "# To get rid of the folder '_subject_id_07' and renaming detrend\n", + "substitutions += [('_subject_id_%s/detrend' % s,\n", + " '_subject_id_%s/sub-%s_detrend' % (s, s)) for s in subject_list]\n", + "substitutions += [('_subject_id_%s/' % s, '') for s in subject_list]\n", + "datasink.inputs.substitutions = substitutions\n", + "\n", + "# Runs the preprocessing workflow again, this time with substitutions\n", + "preproc.run('MultiProc', plugin_args={'n_procs': 4})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Now we're ready for the next section!" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python [default]", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}