Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
163 commits
Select commit Hold shift + click to select a range
93c9a36
Use MRG_RandomStreams instead for shared_randomstreams for GPU compat.
abergeron Oct 3, 2016
4f251cd
add testsuites names
slefrancois Oct 6, 2016
500c214
Merge pull request #173 from slefrancois/testsuite_names
nouiz Oct 6, 2016
82c85e2
Add JUnit writer for speed tests, remove hardcoded reference times
slefrancois Oct 6, 2016
9918b7a
remove testsuite prefix option
slefrancois Oct 7, 2016
99c7aef
Merge pull request #175 from slefrancois/testsuite_names
nouiz Oct 7, 2016
f14107d
single performance file open, correct only access times variables if …
slefrancois Oct 7, 2016
f724c2c
move assert gpu_times not nan
slefrancois Oct 7, 2016
6b719ba
Merge pull request #174 from slefrancois/speed_report
nouiz Oct 11, 2016
85f56c2
add explicit CUDA path to buildbot
slefrancois Oct 13, 2016
5911cd1
Merge pull request #177 from slefrancois/test_jenkins
nouiz Oct 13, 2016
d403591
Compute mean in higher precision to avoid overflow.
abergeron Oct 20, 2016
5a13d98
Fix import of sandbox.
abergeron Oct 20, 2016
93837e0
Fix printout in lstm.py.
abergeron Oct 24, 2016
780cecc
Adjust mean dtypes for scores in SdA too.
abergeron Oct 26, 2016
f1c0587
Merge pull request #172 from abergeron/mixed
lamblin Oct 31, 2016
cd462ec
split performance report file
slefrancois Nov 3, 2016
a74f528
Merge pull request #178 from slefrancois/test_perf_report
nouiz Nov 4, 2016
fd5cb65
Do the speed test on the new gpu back-end.
nouiz Jan 13, 2017
fe3dc30
Merge pull request #180 from nouiz/master2
nouiz Jan 14, 2017
e481d33
install libgpuarray for dlt speed tests
slefrancois Jan 16, 2017
9c8d609
Merge pull request #181 from slefrancois/add_gpuarray
nouiz Jan 16, 2017
73e621d
move speedtest cache outside workspace
Apr 18, 2017
8d906ad
Merge pull request #184 from lisa-lab/speedtest_move_cache
nouiz Apr 18, 2017
c52fa00
add label to speedtest class
Apr 19, 2017
e302237
Merge pull request #185 from slefrancois/speed_label
nouiz Apr 21, 2017
e7b2dc8
buildbot includes theano.gpuarray
Apr 28, 2017
6ef907b
Merge pull request #187 from slefrancois/buildbot_gpuarray
nouiz May 5, 2017
61adbf8
fix typos/spelling
kirkins Jun 6, 2017
8819681
remove extra space
kirkins Jun 6, 2017
59667bd
remove extra space
kirkins Jun 6, 2017
3704876
remove more spaces
kirkins Jun 6, 2017
f78ba92
extra space in logreg
Jun 6, 2017
1867a4e
remove spaces in mlp page
Jun 6, 2017
d829400
remove spaces in dA page
Jun 6, 2017
738b641
remove space in rbm page
Jun 6, 2017
ec4855a
spaces on DBN page
Jun 6, 2017
85962ee
spaces on lstm page
Jun 6, 2017
7cade8c
Merge pull request #188 from kirkins/patch-1
nouiz Jun 7, 2017
bb2aa41
typo and space fix
Jun 7, 2017
8eb21da
typo on lenet page
Jun 7, 2017
147cb2e
typo/inconsistency in spelling of corruption
Jun 7, 2017
f7f7933
Merge pull request #190 from kirkins/proof_reading
nouiz Jun 19, 2017
534e915
Add small note on easy download script
kirkins Jun 20, 2017
cb4261c
Tell that it work on Mac.
nouiz Jun 20, 2017
c745a08
Merge pull request #191 from kirkins/patch-1
nouiz Jun 26, 2017
36ec511
add link to github
Jul 11, 2017
81f2575
typo
Jul 11, 2017
1cd8f36
Merge pull request #192 from slefrancois/link_github
lamblin Jul 11, 2017
ebb8c21
update nosetests command
Jul 26, 2017
8d25f1a
use nosetests directly for gpu
Jul 26, 2017
764cd4c
libgpuarray full checkout
Sep 7, 2017
ea2c59f
Merge pull request #194 from slefrancois/gpuarray_depth
nouiz Sep 8, 2017
544c48c
MKL settings
Oct 30, 2017
2030c5a
Merge pull request #196 from slefrancois/mkl
nouiz Oct 30, 2017
057fd57
Fix travis with newer MKL and Theano.
nouiz Oct 30, 2017
12a557a
Merge pull request #197 from nouiz/master2
nouiz Oct 30, 2017
62e4c21
set OMP_NUM_THREADS
Oct 30, 2017
8bd8a5a
Merge pull request #198 from slefrancois/omp_numthread
nouiz Oct 30, 2017
212a8cb
first commit
StephanieLarocque May 1, 2017
46f6a35
images for fcn and unet
StephanieLarocque May 1, 2017
3cc70cd
small changes
StephanieLarocque May 1, 2017
6fed95d
fixed details
adri-romsor May 1, 2017
ae93f3a
polyps dataset explanation + metrics
StephanieLarocque May 1, 2017
ae295bd
jaccard visualisation
StephanieLarocque May 1, 2017
1433d2e
code for fcn8, miss dataset_loaders
StephanieLarocque May 1, 2017
de2d672
small changes
StephanieLarocque May 1, 2017
5a2bf99
small changes
StephanieLarocque May 1, 2017
04a030d
old build instructions
StephanieLarocque May 1, 2017
f17ad4a
fixed fcn8
adri-romsor May 1, 2017
c987621
cortical
StephanieLarocque May 1, 2017
43207d7
ray
StephanieLarocque May 1, 2017
1766b4e
remove file
StephanieLarocque May 1, 2017
022d3c0
remove dependance from metrics.py
StephanieLarocque May 1, 2017
a430525
remove dependance from model_helpers.py
StephanieLarocque May 1, 2017
350cff2
big brain images
StephanieLarocque May 1, 2017
e6e82c7
added reference captions
StephanieLarocque May 1, 2017
fe0ea46
global dataset loader from fvsin
StephanieLarocque May 1, 2017
604025b
first commit for cortical layers segmentation
StephanieLarocque May 1, 2017
fa32fcb
first commit
StephanieLarocque May 1, 2017
f76c5f8
cortical layers
May 2, 2017
f986af8
cortical layers updated
StephanieLarocque May 2, 2017
1e29a68
cortical layers imagse
StephanieLarocque May 2, 2017
694c612
Feedback comments from PascalLamblin
StephanieLarocque May 2, 2017
eeedbd9
fixing small details on the metrics
adri-romsor May 2, 2017
138ac61
dataset explanation addeed
StephanieLarocque May 2, 2017
0d2e40f
small changes
StephanieLarocque May 2, 2017
1e86c47
reviewed 1d segm
adri-romsor May 2, 2017
f7267b9
Delete fcn_1D_segm.txt
StephanieLarocque May 2, 2017
37165c9
fix for FCN32-16-8 description
StephanieLarocque May 2, 2017
4fa0e2c
FCN description fixed
StephanieLarocque May 2, 2017
548d011
typo
StephanieLarocque May 2, 2017
0c02bde
fix figure labelling
StephanieLarocque May 2, 2017
0cb25a5
explanation ground truth vs predicted segmentation
StephanieLarocque May 2, 2017
4aef44e
figure 4 description
StephanieLarocque May 2, 2017
1ec060c
deleted files
StephanieLarocque May 2, 2017
635c16c
first commit for fcn1D segmentations
StephanieLarocque May 2, 2017
deec646
edited text, including 2D-> 3D changes
May 3, 2017
dc9f370
lasagne recipe unet implementation
May 3, 2017
18a5dd1
unet update
May 3, 2017
c8d3ed4
added code
May 3, 2017
62bf8b0
changes in the prerequisite
May 3, 2017
22258be
small corrections
adri-romsor May 3, 2017
43ed78e
references fixed
May 3, 2017
e6055ce
acknowledgements
May 3, 2017
4f37df3
fix website links
May 3, 2017
09bbbd2
polyps results image
May 3, 2017
683a233
no change
May 3, 2017
e364e6f
update from pascal comment
May 3, 2017
b218cfb
small changes
May 4, 2017
d515c79
delete unet.py (previous version)
StephanieLarocque May 4, 2017
fc5858d
small change
StephanieLarocque May 4, 2017
c409e17
cleaned dataset loader
StephanieLarocque May 4, 2017
a38467b
relative paths
StephanieLarocque May 4, 2017
88f5441
cleaned dataset loader
StephanieLarocque May 4, 2017
873e9d4
deleted file
StephanieLarocque May 4, 2017
3ee4686
relative paths
StephanieLarocque May 4, 2017
2f92d33
with BN
StephanieLarocque May 4, 2017
762da4b
small changes
StephanieLarocque May 4, 2017
2026c59
Rename code/Unet_lasagne_recipes.py to code/unet/Unet_lasagne_recipes.py
StephanieLarocque May 4, 2017
8285a9f
first commit
StephanieLarocque May 4, 2017
d63ae5f
dataset loader for unet and fcn8
StephanieLarocque May 4, 2017
ce89838
fixed path for unet.py
StephanieLarocque May 4, 2017
cac2d01
adde training loop link
StephanieLarocque May 4, 2017
c07ef34
fix dataset loader for em, name for polyps
StephanieLarocque May 4, 2017
a6e6bf1
fix import
StephanieLarocque May 4, 2017
0558598
small change
StephanieLarocque May 4, 2017
70495d8
small change
StephanieLarocque May 4, 2017
3f8dd64
fix input dim and details
StephanieLarocque May 4, 2017
e3a4ebf
import conv2DDDNlayer
StephanieLarocque May 4, 2017
44ff025
saving stuff
StephanieLarocque May 4, 2017
bbdf0e5
small changes
StephanieLarocque May 4, 2017
c1a2b19
accuracy import fix
StephanieLarocque May 4, 2017
1adcc24
small change
StephanieLarocque May 4, 2017
19068d4
load data from train file
StephanieLarocque May 4, 2017
f1972a4
data link
StephanieLarocque May 4, 2017
db56146
removed files .pyc
May 5, 2017
d731882
index updated with new links
May 5, 2017
ca52c8a
ref for dataset loaders
May 5, 2017
119d10e
remove loading pretrained weights
May 5, 2017
5b05fab
update in loading data parameters
May 5, 2017
6f867c6
added requirements for lasagne, datasetloaders and simple ITK
May 6, 2017
70c213a
specify that data augmentation is used
May 6, 2017
15efc40
data augmentation fixed
May 6, 2017
739870e
fix in shared_path for config.ini
May 6, 2017
ac54712
More small fixes
lamblin May 9, 2017
ef57f6b
Fix warnings in doc generation
lamblin May 9, 2017
5a62c66
Remove files that are not used any more
lamblin May 10, 2017
2a9af6f
Formatting
lamblin May 10, 2017
e0d53ba
Changes to help unet run
lamblin May 23, 2017
28fa556
added per class jaccard
adri-romsor May 23, 2017
86d6e16
Results for unet
lamblin May 24, 2017
40819f9
Preload data for 1D cortical dataset
lamblin May 25, 2017
72881fb
Update segmentation tutorials and .gitignore.
notoraptor Dec 14, 2017
e82dadc
Update rnnrbm.py
LeoMingo Jan 19, 2018
238a0bc
Merge pull request #201 from LeoMingo/patch-3
lamblin Jan 26, 2018
562a389
add conlleval.pl
slefrancois Mar 8, 2018
0316283
update conlleval version
slefrancois Mar 9, 2018
2829c28
update url
slefrancois Mar 9, 2018
b13a1b7
update credits
slefrancois Mar 9, 2018
7f06bc2
Merge pull request #202 from slefrancois/conlleval
nouiz Mar 9, 2018
479f645
Add final scores and training times for FCN 2D segmentation
lamblin Jun 15, 2018
11c4651
Merge pull request #204 from lisa-lab/segmentation
lamblin Jun 15, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
.idea
code/*.pyc
code/*_plots
code/tmp*
Expand All @@ -13,3 +14,5 @@ html
*.pyc
*~
*.swp
# This directory may be created by scripts from segmentation tutorials.
save_models
68 changes: 60 additions & 8 deletions .jenkins/jenkins_buildbot_dlt.sh
Original file line number Diff line number Diff line change
@@ -1,18 +1,62 @@
#!/bin/bash

# CUDA
export PATH=/usr/local/cuda/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
export LIBRARY_PATH=/usr/local/cuda/lib64:$LIBRARY_PATH

# MKL
export MKL_THREADING_LAYER=GNU

# Set OpenMP threads for stability of speedtests
export OMP_NUM_THREADS=1

BUILDBOT_DIR=$WORKSPACE/nightly_build
source $HOME/.bashrc

mkdir -p ${BUILDBOT_DIR}

date
COMPILEDIR=$WORKSPACE/compile/lisa_theano_compile_dir_deeplearning
COMPILEDIR=$HOME/.theano/lisa_theano_buildbot_deeplearning
NOSETESTS=${BUILDBOT_DIR}/Theano/bin/theano-nose
XUNIT="--with-xunit --xunit-file="
# name test suites
SUITE="--xunit-testsuite-name="

FLAGS=warn.ignore_bug_before=0.5,compiledir=${COMPILEDIR}
export PYTHONPATH=${BUILDBOT_DIR}/Theano:${BUILDBOT_DIR}/Pylearn:$PYTHONPATH

# Install libgpuarray and pygpu
cd ${BUILDBOT_DIR}

# Make fresh clone (with no history since we don't need it)
rm -rf libgpuarray
git clone "https://github.com/Theano/libgpuarray.git"

(cd libgpuarray && echo "libgpuarray commit" && git rev-parse HEAD)

# Clean up previous installs (to make sure no old files are left)
rm -rf local
mkdir local

# Build libgpuarray and run C tests
mkdir libgpuarray/build
(cd libgpuarray/build && cmake .. -DCMAKE_BUILD_TYPE=${GPUARRAY_CONFIG} -DCMAKE_INSTALL_PREFIX=${BUILDBOT_DIR}/local && make)

# Finally install
(cd libgpuarray/build && make install)
export LD_LIBRARY_PATH=${BUILDBOT_DIR}/local/lib:${LD_LIBRARY_PATH}
export LIBRARY_PATH=${BUILDBOT_DIR}/local/lib:${LIBRARY_PATH}
export CPATH=${BUILDBOT_DIR}/local/include:${CPATH}

# Build the pygpu modules
(cd libgpuarray && python setup.py build_ext --inplace -I${BUILDBOT_DIR}/local/include -L${BUILDBOT_DIR}/local/lib)

mkdir ${BUILDBOT_DIR}/local/lib/python
export PYTHONPATH=${PYTHONPATH}:${BUILDBOT_DIR}/local/lib/python
# Then install
(cd libgpuarray && python setup.py install --home=${BUILDBOT_DIR}/local)

# Install Theano
cd ${BUILDBOT_DIR}
if [ ! -d ${BUILDBOT_DIR}/Theano ]; then
git clone git://github.com/Theano/Theano.git
Expand All @@ -28,9 +72,17 @@ echo "git version for Theano:" `git rev-parse HEAD`
cd ${WORKSPACE}/code
echo "git version:" `git rev-parse HEAD`

echo "executing nosetests speed with mode=FAST_RUN"
FILE=${BUILDBOT_DIR}/dlt_tests.xml
THEANO_FLAGS=${FLAGS},mode=FAST_RUN ${NOSETESTS} ${XUNIT}${FILE} test.py:speed
echo "executing nosetests with mode=FAST_RUN,floatX=float32"
FILE=${BUILDBOT_DIR}/dlt_float32_tests.xml
THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32 ${NOSETESTS} ${XUNIT}${FILE}
echo "==== Executing nosetests speed with mode=FAST_RUN"
NAME=dlt_speed
FILE=${BUILDBOT_DIR}/${NAME}_tests.xml
THEANO_FLAGS=${FLAGS},mode=FAST_RUN ${NOSETESTS} ${XUNIT}${FILE} ${SUITE}${NAME} test.py:speed

echo "==== Executing nosetests with mode=FAST_RUN,floatX=float32"
NAME=dlt_float32
FILE=${BUILDBOT_DIR}/${NAME}_tests.xml
THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32 ${NOSETESTS} ${XUNIT}${FILE} ${SUITE}${NAME}

echo "==== Executing nosetests with mode=FAST_RUN,floatX=float32,device=cuda"
NAME=dlt_float32_cuda
FILE=${BUILDBOT_DIR}/${NAME}_tests.xml
PYTHONPATH=${BUILDBOT_DIR}/Theano:${BUILDBOT_DIR}/DeepLearningTutorials/code:${PYTHONPATH} THEANO_FLAGS=${FLAGS},mode=FAST_RUN,floatX=float32,device=cuda nosetests test.py ${XUNIT}${FILE} ${SUITE}${NAME}
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ script:
- pwd
- ls
- export THEANO_FLAGS=warn.ignore_bug_before=all,on_opt_error=raise,on_shape_error=raise
- export MKL_THREADING_LAYER=GNU
- python --version
- nosetests -v $PART

2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ Subdirectories:
Build instructions
------------------

To build the html version of the tutorials, install sphinx and run doc/Makefile
To build the html version of the tutorials, run python doc/scripts/docgen.py
6 changes: 3 additions & 3 deletions code/DBN.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
c.append(pretraining_fns[i](index=batch_index,
lr=pretrain_lr))
print('Pre-training layer %i, epoch %d, cost ' % (i, epoch), end=' ')
print(numpy.mean(c))
print(numpy.mean(c, dtype='float64'))

end_time = timeit.default_timer()
# end-snippet-2
Expand Down Expand Up @@ -391,7 +391,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,
if (iter + 1) % validation_frequency == 0:

validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
this_validation_loss = numpy.mean(validation_losses, dtype='float64')
print('epoch %i, minibatch %i/%i, validation error %f %%' % (
epoch,
minibatch_index + 1,
Expand All @@ -414,7 +414,7 @@ def test_DBN(finetune_lr=0.1, pretraining_epochs=100,

# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
test_score = numpy.mean(test_losses, dtype='float64')
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
Expand Down
8 changes: 4 additions & 4 deletions code/SdA.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@

import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams

from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
Expand Down Expand Up @@ -394,7 +394,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))
print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c, dtype='float64')))

end_time = timeit.default_timer()

Expand Down Expand Up @@ -442,7 +442,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,

if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
this_validation_loss = numpy.mean(validation_losses, dtype='float64')
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
Expand All @@ -463,7 +463,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,

# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
test_score = numpy.mean(test_losses, dtype='float64')
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
Expand Down
Empty file.
185 changes: 185 additions & 0 deletions code/cnn_1D_segm/data_loader/cortical_layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
import os
import time

import numpy as np
from PIL import Image
import re
import warnings

from dataset_loaders.parallel_loader import ThreadedDataset
from parallel_loader_1D import ThreadedDataset_1D

floatX = 'float32'

class Cortical6LayersDataset(ThreadedDataset_1D):
'''The Cortical Layers Dataset.
Parameters
----------
which_set: string
A string in ['train', 'val', 'valid', 'test'], corresponding to
the set to be returned.
split: float
A float indicating the dataset split between training and validation.
For example, if split=0.85, 85\% of the images will be used for training,
whereas 15\% will be used for validation.
'''
name = 'cortical_layers'

non_void_nclasses = 7
GTclasses = [0, 1, 2, 3, 4, 5, 6]
_cmap = {
0: (128, 128, 128), # padding
1: (128, 0, 0), # layer 1
2: (128, 64, ), # layer 2
3: (128, 64, 128), # layer 3
4: (0, 0, 128), # layer 4
5: (0, 0, 64), # layer 5
6: (64, 64, 128), # layer 6
}
_mask_labels = {0: 'padding', 1: 'layers1', 2: 'layer2', 3: 'layer3',
4: 'layer4', 5: 'layer5', 6: 'layer6'}
_void_labels = []


_filenames = None

@property
def filenames(self):

if self._filenames is None:
# Load filenames
nfiles = sum(1 for line in open(self.mask_path))
filenames = range(nfiles)
np.random.seed(1609)
np.random.shuffle(filenames)

if self.which_set == 'train':
filenames = filenames[:int(nfiles*self.split)]
elif self.which_set == 'val':
filenames = filenames[-(nfiles - int(nfiles*self.split)):]

# Save the filenames list
self._filenames = filenames

return self._filenames

def __init__(self,
which_set="train",
split=0.85,
shuffle_at_each_epoch = True,
smooth_or_raw = 'both',
*args, **kwargs):

self.task = 'segmentation'

self.n_layers = 6
n_layers_path = str(self.n_layers)+"layers_segmentation"

self.which_set = "val" if which_set == "valid" else which_set
if self.which_set not in ("train", "val", 'test'):
raise ValueError("Unknown argument to which_set %s" %
self.which_set)

self.split = split

self.image_path_raw = os.path.join(self.path,n_layers_path,"training_raw.txt")
self.image_path_smooth = os.path.join(self.path,n_layers_path, "training_geo.txt")
self.mask_path = os.path.join(self.path,n_layers_path, "training_cls.txt")
self.regions_path = os.path.join(self.path, n_layers_path, "training_regions.txt")

self.smooth_raw_both = smooth_or_raw

if smooth_or_raw == 'both':
self.data_shape = (200,2)
else :
self.data_shape = (200,1)

super(Cortical6LayersDataset, self).__init__(*args, **kwargs)

def get_names(self):
"""Return a dict of names, per prefix/subset."""

return {'default': self.filenames}



def test_6layers():
train_iter = Cortical6LayersDataset(
which_set='train',
smooth_or_raw = 'both',
batch_size=500,
data_augm_kwargs={},
return_one_hot=False,
return_01c=False,
return_list=True,
use_threads=False)

valid_iter = Cortical6LayersDataset(
which_set='valid',
smooth_or_raw = 'smooth',
batch_size=500,
data_augm_kwargs={},
return_one_hot=False,
return_01c=False,
return_list=True,
use_threads=False)

valid_iter2 = Cortical6LayersDataset(
which_set='valid',
smooth_or_raw = 'raw',
batch_size=500,
data_augm_kwargs={},
return_one_hot=False,
return_01c=False,
return_list=True,
use_threads=False)



train_nsamples = train_iter.nsamples
train_nbatches = train_iter.nbatches
valid_nbatches = valid_iter.nbatches
valid_nbatches2 = valid_iter2.nbatches



# Simulate training
max_epochs = 1
print "Simulate training for", str(max_epochs), "epochs"
start_training = time.time()
for epoch in range(max_epochs):
print "Epoch #", str(epoch)

start_epoch = time.time()

print "Iterate on the training set", train_nbatches, "minibatches"
for mb in range(train_nbatches):
start_batch = time.time()
batch = train_iter.next()
if mb%5 ==0:
print("Minibatch train {}: {} sec".format(mb, (time.time() -
start_batch)))

print "Iterate on the validation set", valid_nbatches, "minibatches"
for mb in range(valid_nbatches):
start_batch = time.time()
batch = valid_iter.next()
if mb%5 ==0:
print("Minibatch valid {}: {} sec".format(mb, (time.time() -
start_batch)))

print "Iterate on the validation set (second time)", valid_nbatches2, "minibatches"
for mb in range(valid_nbatches2):
start_batch = time.time()
batch = valid_iter2.next()
if mb%5==0:
print("Minibatch valid {}: {} sec".format(mb, (time.time() -
start_batch)))

print("Epoch time: %s" % str(time.time() - start_epoch))
print("Training time: %s" % str(time.time() - start_training))

if __name__ == '__main__':
print "Loading the dataset 1 batch at a time"
test_6layers()
print "Success!"
Loading