diff --git a/.idea/MixMatch-pytorch.iml b/.idea/MixMatch-pytorch.iml
new file mode 100644
index 0000000..7c9d48f
--- /dev/null
+++ b/.idea/MixMatch-pytorch.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..a2e120d
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..a739c27
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000..9169097
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,313 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ models
+ progress
+ summarywriter
+ criterion
+ randompadandcrop
+ model
+ ws
+ bar
+ classifier
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1560204370608
+
+
+ 1560204370608
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dataset/Download_Cifar.py b/dataset/Download_Cifar.py
new file mode 100644
index 0000000..cd571ff
--- /dev/null
+++ b/dataset/Download_Cifar.py
@@ -0,0 +1,109 @@
+
+from ops.os_operation import mkdir
+import os
+from torchvision.datasets.utils import download_url, check_integrity
+import sys
+if sys.version_info[0] == 2:
+ import cPickle as pickle
+else:
+ import pickle
+import numpy as np
+
+class CIFAR10(object):
+ """`CIFAR10 `_ Dataset.
+ Args:
+ root (string): Root directory of dataset where directory
+ ``cifar-10-batches-py`` exists or will be saved to if download is set to True.
+
+ download (): downloads the dataset from the internet and
+ puts it in root directory
+ """
+
+ def __init__(self, save_path):
+ self.root=save_path
+ self.download_init()
+ if not self._check_integrity():
+ mkdir(save_path)
+ self.download()
+ self.final_path=os.path.join(save_path,'cifar10')
+ mkdir(self.final_path)
+ #generate npy files here
+ self.train_path=os.path.join(self.final_path,'trainset')
+ self.test_path = os.path.join(self.final_path, 'testset')
+ mkdir(self.train_path)
+ mkdir(self.test_path)
+ if os.path.getsize(self.train_path)<10000:
+ self.Process_Dataset(self.train_list,self.train_path)
+ if os.path.getsize(self.test_path)<10000:
+ self.Process_Dataset(self.test_list,self.test_path)
+ def download_init(self):
+ self.base_folder = 'cifar-10-batches-py'
+ self.url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
+ self.filename = "cifar-10-python.tar.gz"
+ self.tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
+ self.train_list = [
+ ['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
+ ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
+ ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
+ ['data_batch_4', '634d18415352ddfa80567beed471001a'],
+ ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
+ ]
+
+ self.test_list = [
+ ['test_batch', '40351d587109b95175f43aff81a1287e'],
+ ]
+
+ def download(self):
+ import tarfile
+
+ if self._check_integrity():
+ print('Files already downloaded and verified')
+ return
+
+ root = self.root
+ download_url(self.url, root, self.filename, self.tgz_md5)
+
+ # extract file
+ cwd = os.getcwd()
+ tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
+ os.chdir(root)
+ tar.extractall()
+ tar.close()
+ os.chdir(cwd)
+
+ def _check_integrity(self):
+
+ root = self.root
+ for fentry in (self.train_list + self.test_list):
+ filename, md5 = fentry[0], fentry[1]
+ fpath = os.path.join(root, self.base_folder, filename)
+ if not check_integrity(fpath, md5):
+ return False
+ return True
+
+ def Process_Dataset(self,train_list,train_path):
+ train_data=[]
+ train_labels=[]
+ for fentry in train_list:
+ f = fentry[0]
+ file = os.path.join(self.root, self.base_folder, f)
+ with open(file, 'rb') as fo:
+ if sys.version_info[0] == 2:
+ entry = pickle.load(fo)
+ else:
+ entry = pickle.load(fo, encoding='latin1')
+ train_data.append(entry['data'])
+ if 'labels' in entry:
+ train_labels += entry['labels']
+ else:
+ train_labels += entry['fine_labels']
+ train_data = np.concatenate(train_data)
+ train_data = train_data.reshape((len(train_data), 3, 32, 32))
+ train_labels=np.array(train_labels)
+ #following Channel,height,width format
+ #self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
+ for i in range(len(train_data)):
+ tmp_train_path=os.path.join(train_path,'trainset'+str(i)+'.npy')
+ tmp_aim_path = os.path.join(train_path, 'aimset' + str(i) + '.npy')
+ np.save(tmp_train_path,train_data[i])
+ np.save(tmp_aim_path,train_labels[i])
diff --git a/dataset/Projective_MixMatch_Dataloader.py b/dataset/Projective_MixMatch_Dataloader.py
new file mode 100644
index 0000000..b960e61
--- /dev/null
+++ b/dataset/Projective_MixMatch_Dataloader.py
@@ -0,0 +1,222 @@
+import torch
+import torch.utils.data as data
+import numpy as np
+import random
+import os
+from PIL import Image, PILLOW_VERSION
+import numbers
+from torchvision.transforms.functional import _get_inverse_affine_matrix
+import math
+from sklearn.model_selection import train_test_split
+from collections import defaultdict
+
+class TransformTwice:
+ def __init__(self, transform):
+ self.transform = transform
+
+ def __call__(self, inp):
+ out1 = self.transform(inp)
+ out2 = self.transform(inp)
+ return out1, out2
+
+class Projective_MixMatch_Data_Loader(data.Dataset):
+ def __init__(self, dataset_dir,shift=6, train_label=True, scale=None, resample=False,
+ fillcolor=0,matrix_transform=None,
+ transform_pre=None, transform=None, target_transform=None, rand_state=888,
+ valid_size=0.1,uniform_label=False,num_classes=10,unlabel_Data=False):
+ super(Projective_MixMatch_Data_Loader, self).__init__()
+ self.root=os.path.abspath(dataset_dir)
+ self.shift=shift
+ self.trainsetFile = []
+ self.aimsetFile = []
+ listfiles = os.listdir(dataset_dir)
+ self.trainlist = [os.path.join(dataset_dir, x) for x in listfiles if "trainset" in x]
+ self.aimlist = [os.path.join(dataset_dir, x) for x in listfiles if "aimset" in x]
+ self.trainlist.sort()
+ self.aimlist.sort()
+ self.train_label=train_label
+ self.valid_size=valid_size
+ self.unlabel_Data=unlabel_Data
+ # here update this with 80% as training, 20%as validation
+ if valid_size>0:
+ if uniform_label==False:
+
+ X_train, X_test, y_train, y_test = train_test_split(self.trainlist, self.aimlist, test_size=valid_size,
+ random_state=rand_state)
+ if train_label:
+ self.trainlist = X_train
+ self.aimlist = y_train
+
+ else:
+ self.trainlist = X_test
+ self.aimlist = y_test
+ else:
+ #pick the uniform valid size indicated
+ shuffle_range=np.arange(len(self.trainlist))
+ random.seed(rand_state)
+ random.shuffle(shuffle_range)
+ require_size=int(len(self.aimlist)*valid_size/num_classes)
+ self.trainlist,self.aimlist=self.pick_top_k_example(require_size,shuffle_range,num_classes)
+ if uniform_label==True and len(self.trainlist)<50000:
+ #to accelerate training to avoid dataloader load again and again for small data
+ repeat_times=int(50000/len(self.trainlist))
+ self.trainlist=self.trainlist*repeat_times
+ self.aimlist=self.aimlist*repeat_times
+ self.transform_pre = transform_pre
+ if scale is not None:
+ assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
+ "scale should be a list or tuple and it must be of length 2."
+ for s in scale:
+ if s <= 0:
+ raise ValueError("scale values should be positive")
+ self.scale = scale
+ self.resample = resample
+ self.fillcolor = fillcolor
+ self.transform = transform
+ self.target_transform = target_transform
+ self.matrix_transform=matrix_transform
+ def pick_top_k_example(self,img_per_cat,shuffle_range,num_class):
+ record_dict=defaultdict(list)
+ for i in range(len(shuffle_range)):
+ tmp_id=shuffle_range[i]
+ label=int(np.load(self.aimlist[tmp_id]))
+ if label not in record_dict:
+ record_dict[label].append(tmp_id)
+ elif len(record_dict[label])=8:
+ self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1,bias=False)
+ self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1,bias=False)
+ else:
+ self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1, bias=False)
+ self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim , kernel_size=1, bias=False)
+ self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1,bias=False)
+ self.gamma = nn.Parameter(torch.zeros(1))
+
+ self.softmax = nn.Softmax(dim=-1) #
+
+ def forward(self, x):
+ """
+ inputs :
+ x : input feature maps( B X C X W X H)
+ returns :
+ out : self attention value + input feature
+ attention: B X N X N (N is Width*Height)
+ """
+ m_batchsize, C, width, height = x.size()
+ proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
+ proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
+ energy = torch.bmm(proj_query, proj_key) # transpose check
+ attention = self.softmax(energy) # BX (N) X (N)
+ proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
+
+ out = torch.bmm(proj_value, attention.permute(0, 2, 1))
+ out = out.view(m_batchsize, C, width, height)
+
+ out = self.gamma * out + x
+ return out, attention
\ No newline at end of file
diff --git a/models/Classifier.py b/models/Classifier.py
new file mode 100644
index 0000000..33145c8
--- /dev/null
+++ b/models/Classifier.py
@@ -0,0 +1,24 @@
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from models.TE_Module import TE_Module
+class Classifier(nn.Module):
+ def __init__(self, _num_stages=3, _use_avg_on_conv3=True, indim=128, num_classes=10):
+ """
+ :param _num_stages: block combination
+ :param _use_avg_on_conv3: finally use avg or not
+ :param indim:
+ :param num_classes: transformation matrix
+ """
+ #nChannels = 192
+ super(Classifier, self).__init__()
+ self.clf = TE_Module(_num_stages=_num_stages, _use_avg_on_conv3=_use_avg_on_conv3)
+ self.fc = nn.Linear(indim, num_classes)
+ for m in self.modules():
+ if isinstance(m, nn.Linear):
+ m.bias.data.zero_()
+ #self.attention=Self_Attn(nChannels, 'relu')
+ def forward(self, x1):
+ x1,_ = self.clf(x1)
+ return self.fc(x1)
\ No newline at end of file
diff --git a/models/TEBlock.py b/models/TEBlock.py
new file mode 100644
index 0000000..53b98d6
--- /dev/null
+++ b/models/TEBlock.py
@@ -0,0 +1,39 @@
+import math
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+class TEBlock(nn.Module):
+ def __init__(self, in_planes, out_planes, kernel_size):
+ super(TEBlock, self).__init__()
+ padding = int((kernel_size-1)/2)#same size output
+ self.layers = nn.Sequential()
+ self.layers.add_module('Conv', nn.Conv2d(in_planes, out_planes, \
+ kernel_size=kernel_size, stride=1, padding=padding, bias=False))
+ self.layers.add_module('BatchNorm', nn.BatchNorm2d(out_planes))
+ self.layers.add_module('ReLU', nn.LeakyReLU(negative_slope=0.1,inplace=True))
+
+ def forward(self, x):
+ return self.layers(x)
+class TEBlock1(nn.Module):
+ def __init__(self, in_planes, out_planes, kernel_size):
+ super(TEBlock1, self).__init__()
+ padding = 0
+ self.layers = nn.Sequential()
+ self.layers.add_module('Conv', nn.Conv2d(in_planes, out_planes, \
+ kernel_size=kernel_size, stride=1, padding=padding, bias=False))
+ self.layers.add_module('BatchNorm', nn.BatchNorm2d(out_planes))
+ self.layers.add_module('ReLU', nn.LeakyReLU(negative_slope=0.1,inplace=True))
+
+ def forward(self, x):
+ return self.layers(x)
+
+class GlobalAveragePooling(nn.Module):
+ def __init__(self):
+ super(GlobalAveragePooling, self).__init__()
+
+ def forward(self, feat):
+ num_channels = feat.size(1)
+ return F.avg_pool2d(feat, (feat.size(2), feat.size(3))).view(-1, num_channels)
+
+
diff --git a/models/TE_Module.py b/models/TE_Module.py
new file mode 100644
index 0000000..67724fa
--- /dev/null
+++ b/models/TE_Module.py
@@ -0,0 +1,131 @@
+import math
+import torch.nn as nn
+from models.TEBlock import TEBlock,TEBlock1,GlobalAveragePooling
+from models.Attention import Self_Attn
+
+class TE_Module(nn.Module):
+ """
+ reference: TEMPORAL ENSEMBLING FOR SEMI-SUPERVISED
+ LEARNING
+ https://arxiv.org/pdf/1610.02242.pdf
+ """
+ def __init__(self, _num_inchannels=3, _num_stages=3, _use_avg_on_conv3=True,run_type=0):
+ super(TE_Module, self).__init__()
+ self.num_inchannels = _num_inchannels
+ self.num_stages = _num_stages
+ self.use_avg_on_conv3 = _use_avg_on_conv3
+
+ assert (self.num_stages >= 3)
+ nChannels = 128
+ nChannels1 = 256
+ nChannels2 = 512
+ count_stage=0
+
+ additional_stage=0
+ if run_type != 0:
+ additional_stage+=1
+ blocks = [nn.Sequential() for i in range(self.num_stages+additional_stage)]
+ # 1st block,kernel size 3,3,3
+ blocks[count_stage].add_module('Block1_ConvB1', TEBlock(self.num_inchannels, nChannels, 3))
+ blocks[count_stage].add_module('Block1_ConvB2', TEBlock(nChannels, nChannels, 3))
+ blocks[count_stage].add_module('Block1_ConvB3', TEBlock(nChannels, nChannels, 3))
+ blocks[count_stage].add_module('Block1_MaxPool', nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
+ blocks[count_stage].add_module('Block1_Dropout',nn.Dropout(p=0.5, inplace=True))
+ count_stage+=1
+ blocks[count_stage].add_module('Block2_ConvB1', TEBlock(nChannels, nChannels1, 3))
+ blocks[count_stage].add_module('Block2_ConvB2', TEBlock(nChannels1, nChannels1, 3))
+ blocks[count_stage].add_module('Block2_ConvB3', TEBlock(nChannels1, nChannels1, 3))
+ blocks[count_stage].add_module('Block2_MaxPool', nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
+ blocks[count_stage].add_module('Block2_Dropout', nn.Dropout(p=0.5, inplace=True))
+ count_stage+=1
+ if run_type==1 or run_type==2 or run_type==3 or run_type==4 or run_type==8:
+ blocks[count_stage].add_module('Attention', Self_Attn(nChannels1, 'relu'))
+ count_stage+=1
+ blocks[count_stage].add_module('Block3_ConvB1', TEBlock1(nChannels1, nChannels2, 3))
+ blocks[count_stage].add_module('Block3_ConvB2', TEBlock1(nChannels2, nChannels1, 1))
+ blocks[count_stage].add_module('Block3_ConvB3', TEBlock1(nChannels1, nChannels, 1))
+ #add final average pooling
+ blocks.append(nn.Sequential())
+ blocks[-1].add_module('GlobalAveragePooling', GlobalAveragePooling())
+
+ self._feature_blocks = nn.ModuleList(blocks)
+ if run_type==0:
+ self.all_feat_names =['conv' + str(s + 1) for s in range(self.num_stages)] + ['classifier', ]
+ elif run_type==1 or run_type==2 or run_type==3 or run_type==4 or run_type==8:
+ self.all_feat_names = ['conv1'] + ['conv2'] + ['Attention'] + ['conv' + str(s + 1) for s in
+ range(2, self.num_stages)] + ['classifier', ]
+ # example ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'conv6', 'conv7', 'conv8', 'conv9', 'conv10', 'classifier']
+ assert (len(self.all_feat_names) == len(self._feature_blocks))
+ self.weight_initialization()
+ def _parse_out_keys_arg(self, out_feat_keys):
+ """
+ :param out_feat_keys:
+ :return:
+ the lasy layer index from out_feat_keys
+ """
+
+ # By default return the features of the last layer / module.
+ out_feat_keys = [self.all_feat_names[-1], ] if out_feat_keys is None else out_feat_keys
+
+ if len(out_feat_keys) == 0:
+ raise ValueError('Empty list of output feature keys.')
+ for f, key in enumerate(out_feat_keys):
+ if key not in self.all_feat_names:
+ raise ValueError(
+ 'Feature with name {0} does not exist. Existing features: {1}.'.format(key, self.all_feat_names))
+ elif key in out_feat_keys[:f]:
+ raise ValueError('Duplicate output feature key: {0}.'.format(key))
+
+ # Find the highest output feature in `out_feat_keys
+ max_out_feat = max([self.all_feat_names.index(key) for key in out_feat_keys])
+
+ return out_feat_keys, max_out_feat
+
+ def forward(self, x, out_feat_keys=None):
+ """Forward an image `x` through the network and return the asked output features.
+ Args:
+ x: input image.
+ out_feat_keys: a list/tuple with the feature names of the features
+ that the function should return. By default the last feature of
+ the network is returned.
+ Return:
+ out_feats: If multiple output features were asked then `out_feats`
+ is a list with the asked output features placed in the same
+ order as in `out_feat_keys`. If a single output feature was
+ asked then `out_feats` is that output feature (and not a list).
+ """
+ out_feat_keys, max_out_feat = self._parse_out_keys_arg(out_feat_keys)
+ out_feats = [None] * len(out_feat_keys)
+ go_attention_flag = False
+ feat = x
+ for f in range(max_out_feat + 1):
+ key = self.all_feat_names[f]
+ if key == 'Attention':
+ go_attention_flag = True
+ feat, attention = self._feature_blocks[f](feat)
+ else:
+ feat = self._feature_blocks[f](feat)
+ if key in out_feat_keys:
+ out_feats[out_feat_keys.index(key)] = feat
+
+ out_feats = out_feats[0] if len(out_feats) == 1 else out_feats
+ if go_attention_flag:
+ return out_feats, attention
+ else:
+ return out_feats, None
+
+
+ def weight_initialization(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ if m.weight.requires_grad:
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ elif isinstance(m, nn.BatchNorm2d): # init with not transform on batchnorm
+ if m.weight.requires_grad:
+ m.weight.data.fill_(1)
+ if m.bias.requires_grad:
+ m.bias.data.zero_()
+ elif isinstance(m, nn.Linear):
+ if m.bias.requires_grad:
+ m.bias.data.zero_()
\ No newline at end of file
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ops/__init__.py b/ops/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/ops/os_operation.py b/ops/os_operation.py
new file mode 100644
index 0000000..680be05
--- /dev/null
+++ b/ops/os_operation.py
@@ -0,0 +1,17 @@
+import os
+def mkdir(path):
+ path=path.strip()
+ path=path.rstrip("\\")
+ isExists=os.path.exists(path)
+ if not isExists:
+ print (path+" created")
+ os.makedirs(path)
+ return True
+ else:
+ print (path+' existed')
+ return False
+def execCmd(cmd):
+ r = os.popen(cmd)
+ text = r.read()
+ r.close()
+ return text
\ No newline at end of file
diff --git a/train.py b/train.py
index d55d192..7d64702 100644
--- a/train.py
+++ b/train.py
@@ -17,15 +17,18 @@
import torchvision.transforms as transforms
import torch.nn.functional as F
-import models.wideresnet as models
+#import models.wideresnet as models
+from models.Classifier import Classifier
import dataset.cifar10 as dataset
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
+from models.wideresnet import WideResNet
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch MixMatch Training')
# Optimization options
parser.add_argument('--epochs', default=1024, type=int, metavar='N',
help='number of total epochs to run')
+
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=64, type=int, metavar='N',
@@ -51,7 +54,7 @@
parser.add_argument('--lambda-u', default=75, type=float)
parser.add_argument('--T', default=0.5, type=float)
parser.add_argument('--ema-decay', default=0.999, type=float)
-
+parser.add_argument('--type', default=0, type=int,help='Choose different backbone: 0:wide-resnet 1:te-module 13 layer')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
@@ -74,7 +77,7 @@ def main():
mkdir_p(args.out)
# Data
- print(f'==> Preparing cifar10')
+ print('==> Preparing cifar10')
transform_train = transforms.Compose([
dataset.RandomPadandCrop(32),
dataset.RandomFlip(),
@@ -95,7 +98,10 @@ def main():
print("==> creating WRN-28-2")
def create_model(ema=False):
- model = models.WideResNet(num_classes=10)
+ if args.type==1:
+ model = Classifier(num_classes=10)
+ else:
+ model=WideResNet(num_classes=10)
model = model.cuda()
if ema:
@@ -114,7 +120,7 @@ def create_model(ema=False):
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
- ema_optimizer= WeightEMA(model, ema_model, alpha=args.ema_decay)
+ ema_optimizer= WeightEMA(model, ema_model, run_type=args.type,alpha=args.ema_decay)
start_epoch = 0
# Resume
@@ -208,6 +214,7 @@ def train(labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_opti
labeled_train_iter = iter(labeled_trainloader)
inputs_x, targets_x = labeled_train_iter.next()
+
try:
(inputs_u, inputs_u2), _ = unlabeled_train_iter.next()
except:
@@ -318,7 +325,7 @@ def validate(valloader, model, criterion, epoch, use_cuda, mode):
model.eval()
end = time.time()
- bar = Bar(f'{mode}', max=len(valloader))
+ bar = Bar('{mode}', max=len(valloader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
# measure data loading time
@@ -380,11 +387,14 @@ def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch):
return Lx, Lu, args.lambda_u * linear_rampup(epoch)
class WeightEMA(object):
- def __init__(self, model, ema_model, alpha=0.999):
+ def __init__(self, model, ema_model,run_type=0, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.alpha = alpha
- self.tmp_model = models.WideResNet(num_classes=10).cuda()
+ if run_type==1:
+ self.tmp_model = Classifier(num_classes=10).cuda()
+ else:
+ self.tmp_model =WideResNet(num_classes=10).cuda()
self.wd = 0.02 * args.lr
for param, ema_param in zip(self.model.parameters(), self.ema_model.parameters()):
diff --git a/train_myloader.py b/train_myloader.py
new file mode 100644
index 0000000..08adbb5
--- /dev/null
+++ b/train_myloader.py
@@ -0,0 +1,543 @@
+from __future__ import print_function
+
+import argparse
+import os
+import shutil
+import time
+import random
+
+import numpy as np
+
+import torch
+import torch.nn as nn
+import torch.nn.parallel
+import torch.backends.cudnn as cudnn
+import torch.optim as optim
+import torch.utils.data as data
+import torchvision.transforms as transforms
+import torch.nn.functional as F
+
+#import models.wideresnet as models
+from models.Classifier import Classifier
+import dataset.cifar10 as dataset
+from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
+from models.wideresnet import WideResNet
+from tensorboardX import SummaryWriter
+import PIL
+from dataset.Projective_MixMatch_Dataloader import Projective_MixMatch_Data_Loader
+from dataset.Download_Cifar import CIFAR10
+
+
+parser = argparse.ArgumentParser(description='PyTorch MixMatch Training')
+# Optimization options
+parser.add_argument('--epochs', default=1024, type=int, metavar='N',
+ help='number of total epochs to run')
+
+parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
+ help='manual epoch number (useful on restarts)')
+parser.add_argument('--batch_size', default=64, type=int, metavar='N',
+ help='train batchsize')
+parser.add_argument('--lr', '--learning-rate', default=0.002, type=float,
+ metavar='LR', help='initial learning rate')
+# Checkpoints
+parser.add_argument('--resume', default='', type=str, metavar='PATH',
+ help='path to latest checkpoint (default: none)')
+# Miscs
+parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
+#Device options
+parser.add_argument('--gpu', default='0', type=str,
+ help='id(s) for CUDA_VISIBLE_DEVICES')
+#Method options
+parser.add_argument('--n-labeled', type=int, default=250,
+ help='Number of labeled data')
+parser.add_argument('--val-iteration', type=int, default=1024,
+ help='Number of labeled data')
+parser.add_argument('--out', default='result',
+ help='Directory to output the result')
+parser.add_argument('--alpha', default=0.75, type=float)
+parser.add_argument('--lambda-u', default=75, type=float)
+parser.add_argument('--T', default=0.5, type=float)
+parser.add_argument('--ema-decay', default=0.999, type=float)
+parser.add_argument('--type', default=0, type=int,help='Choose different backbone: 0:wide-resnet 1:te-module 13 layer')
+parser.add_argument('--shrink', type=float, default=0.7, help='the lower bound of scaling')
+parser.add_argument('--enlarge', type=float, default=1.3, help='the higher bound of scaling')
+parser.add_argument('--shift',type=float,default=4,help='shift parameter for projective data changing method')
+parser.add_argument('--num_workers', type=int, default=16, help='number of data loading workers')
+parser.add_argument('--portion',type=float,default=0.08,help='percentage of data with labels')
+
+args = parser.parse_args()
+params = vars(args)
+state = {k: v for k, v in args._get_kwargs()}
+
+# Use CUDA
+os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
+use_cuda = torch.cuda.is_available()
+
+# Random seed
+if args.manualSeed is None:
+ args.manualSeed = random.randint(1, 10000)
+np.random.seed(args.manualSeed)
+
+best_acc = 0 # best test accuracy
+
+
+def main():
+ global best_acc
+
+ if not os.path.isdir(args.out):
+ mkdir_p(args.out)
+
+ # Data
+ print('==> Preparing cifar10')
+ transform_train = transforms.Compose([
+ dataset.RandomPadandCrop(32),
+ dataset.RandomFlip(),
+ dataset.ToTensor(),
+ ])
+
+ transform_val = transforms.Compose([
+ dataset.ToTensor(),
+ ])
+
+ # train_labeled_set, train_unlabeled_set, val_set, test_set = dataset.get_cifar10('./data', args.n_labeled,
+ # transform_train=transform_train,
+ # transform_val=transform_val)
+ # labeled_trainloader = data.DataLoader(train_labeled_set, batch_size=args.batch_size, shuffle=True, num_workers=0,
+ # drop_last=True)
+ # unlabeled_trainloader = data.DataLoader(train_unlabeled_set, batch_size=args.batch_size, shuffle=True,
+ # num_workers=0, drop_last=True)
+ # val_loader = data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
+ # test_loader = data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
+ data = CIFAR10('./data')
+ train_dataloader, unlabel_dataloader, valid_dataloader, testloader=prepare_Dataloader(data,params)
+ # Model
+ print("==> creating WRN-28-2")
+
+ def create_model(ema=False):
+ if args.type == 1:
+ model = Classifier(num_classes=10)
+ else:
+ model = WideResNet(num_classes=10)
+ model = model.cuda()
+
+ if ema:
+ for param in model.parameters():
+ param.detach_()
+
+ return model
+
+ model = create_model()
+ ema_model = create_model(ema=True)
+
+ cudnn.benchmark = True
+ print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
+
+ train_criterion = SemiLoss()
+ criterion = nn.CrossEntropyLoss()
+ optimizer = optim.Adam(model.parameters(), lr=args.lr)
+
+ ema_optimizer = WeightEMA(model, ema_model, run_type=args.type, alpha=args.ema_decay)
+ start_epoch = 0
+
+ # Resume
+ title = 'noisy-cifar-10'
+ if args.resume:
+ # Load checkpoint.
+ print('==> Resuming from checkpoint..')
+ assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
+ args.out = os.path.dirname(args.resume)
+ checkpoint = torch.load(args.resume)
+ best_acc = checkpoint['best_acc']
+ start_epoch = checkpoint['epoch']
+ model.load_state_dict(checkpoint['state_dict'])
+ ema_model.load_state_dict(checkpoint['ema_state_dict'])
+ optimizer.load_state_dict(checkpoint['optimizer'])
+ logger = Logger(os.path.join(args.resume, 'log.txt'), title=title, resume=True)
+ else:
+ logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
+ logger.set_names(
+ ['Train Loss', 'Train Loss X', 'Train Loss U', 'Valid Loss', 'Valid Acc.', 'Test Loss', 'Test Acc.'])
+
+ writer = SummaryWriter(args.out)
+ step = 0
+ test_accs = []
+ # Train and val
+ for epoch in range(start_epoch, args.epochs):
+ print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
+
+ train_loss, train_loss_x, train_loss_u = train(train_dataloader, unlabel_dataloader, model, optimizer,
+ ema_optimizer, train_criterion, epoch, use_cuda)
+ _, train_acc = validate(train_dataloader, ema_model, criterion, epoch, use_cuda, mode='Train Stats')
+ val_loss, val_acc = validate(valid_dataloader, ema_model, criterion, epoch, use_cuda, mode='Valid Stats')
+ test_loss, test_acc = validate(testloader, ema_model, criterion, epoch, use_cuda, mode='Test Stats ')
+
+ step = args.batch_size * args.val_iteration * (epoch + 1)
+
+ writer.add_scalar('losses/train_loss', train_loss, step)
+ writer.add_scalar('losses/valid_loss', val_loss, step)
+ writer.add_scalar('losses/test_loss', test_loss, step)
+
+ writer.add_scalar('accuracy/train_acc', train_acc, step)
+ writer.add_scalar('accuracy/val_acc', val_acc, step)
+ writer.add_scalar('accuracy/test_acc', test_acc, step)
+
+ # scheduler.step()
+
+ # append logger file
+ logger.append([train_loss, train_loss_x, train_loss_u, val_loss, val_acc, test_loss, test_acc])
+
+ # save model
+ is_best = val_acc > best_acc
+ best_acc = max(val_acc, best_acc)
+ save_checkpoint({
+ 'epoch': epoch + 1,
+ 'state_dict': model.state_dict(),
+ 'ema_state_dict': ema_model.state_dict(),
+ 'acc': val_acc,
+ 'best_acc': best_acc,
+ 'optimizer': optimizer.state_dict(),
+ }, is_best)
+ test_accs.append(test_acc)
+ logger.close()
+ writer.close()
+
+ print('Best acc:')
+ print(best_acc)
+
+ print('Mean acc:')
+ print(np.mean(test_accs[-20:]))
+
+def prepare_Dataloader(data,params):
+ cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
+ cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
+ #dataset=torchvision.dataset.cifar10(params['F'], train=True, download=True)
+ #from ops.Transform_ops import RandomFlip,RandomPadandCrop
+ # transform_train = transforms.Compose([
+ # transforms.RandomCrop(32, padding=4),
+ # transforms.RandomHorizontalFlip(p=0.5),
+ # transforms.Normalize(cifar10_mean,cifar10_std),
+ # #dataset.ToTensor(),
+ # ])
+ # transform_train = transforms.Compose([
+ # transforms.RandomCrop(32, padding=4),
+ # transforms.RandomHorizontalFlip(),
+ # ]),
+ # transform_final=transforms.Compose([
+ #
+ # transforms.Normalize(cifar10_mean, cifar10_std),
+ # ])
+ #from Data_Processing.Projective_MixMatch_Dataloader import TransformTwice
+ #actually unlabelled training dataloader
+ valid_dataset = Projective_MixMatch_Data_Loader(dataset_dir=data.train_path, shift=params['shift'], train_label=True,
+ scale=(params['shrink'], params['enlarge']), fillcolor=(128, 128, 128),
+ resample=PIL.Image.BILINEAR,
+ matrix_transform=transforms.Compose([
+ transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.),
+ (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
+ ]),
+ transform_pre= transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ ]), rand_state=params['manualSeed'],
+ valid_size=0,
+ )
+ valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=params['batch_size'],
+ shuffle=True, num_workers=int(params['num_workers']))
+
+ unlabel_dataset = Projective_MixMatch_Data_Loader(dataset_dir=data.train_path, shift=params['shift'],
+ train_label=True,
+ scale=(params['shrink'], params['enlarge']),
+ fillcolor=(128, 128, 128),
+ resample=PIL.Image.BILINEAR,
+ matrix_transform=transforms.Compose([
+ transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.),
+ (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
+ ]),
+ transform_pre= transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ ]), rand_state=params['manualSeed'],
+ valid_size=0, unlabel_Data=True
+ )
+ unlabel_dataloader = torch.utils.data.DataLoader(unlabel_dataset, batch_size=params['batch_size'],
+ shuffle=True, num_workers=int(params['num_workers']),drop_last=True)
+ train_labeled_dataset = Projective_MixMatch_Data_Loader(dataset_dir=data.train_path, shift=params['shift'],
+ train_label=False,
+ scale=(params['shrink'], params['enlarge']),
+ fillcolor=(128, 128, 128), resample=PIL.Image.BILINEAR,
+ matrix_transform=transforms.Compose([
+ transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.),
+ (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
+ ]),
+ transform_pre= transforms.Compose([
+ transforms.RandomCrop(32, padding=4),
+ transforms.RandomHorizontalFlip(),
+ ]), rand_state=666, valid_size=params['portion'], uniform_label=True,
+ )
+ train_dataloader = torch.utils.data.DataLoader(train_labeled_dataset, batch_size=params['batch_size'],
+ shuffle=True, num_workers=int(params['num_workers']),drop_last=True)
+ test_dataset = Projective_MixMatch_Data_Loader(dataset_dir=data.test_path, shift=params['shift'], train_label=True,
+ scale=(params['shrink'], params['enlarge']), fillcolor=(128, 128, 128),
+ resample=PIL.Image.BILINEAR,
+ matrix_transform=transforms.Compose([
+ transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.),
+ (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
+ ]),
+ rand_state=666, valid_size=0,
+ )
+ testloader = torch.utils.data.DataLoader(test_dataset, batch_size=params['batch_size'], shuffle=False,
+ num_workers=int(params['num_workers']))
+ return train_dataloader,unlabel_dataloader,valid_dataloader,testloader
+
+
+def train(labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, criterion, epoch, use_cuda):
+ batch_time = AverageMeter()
+ data_time = AverageMeter()
+ losses = AverageMeter()
+ losses_x = AverageMeter()
+ losses_u = AverageMeter()
+ ws = AverageMeter()
+ end = time.time()
+
+ bar = Bar('Training', max=args.val_iteration)
+ labeled_train_iter = iter(labeled_trainloader)
+ unlabeled_train_iter = iter(unlabeled_trainloader)
+
+ model.train()
+ for batch_idx in range(args.val_iteration):
+ try:
+ inputs_x, _,_,targets_x = labeled_train_iter.next()
+ except:
+ labeled_train_iter = iter(labeled_trainloader)
+ inputs_x, _, _, targets_x = labeled_train_iter.next()
+
+ try:
+ (inputs_u, inputs_u2), _,_,_= unlabeled_train_iter.next()
+ except:
+ unlabeled_train_iter = iter(unlabeled_trainloader)
+ (inputs_u, inputs_u2), _,_,_= unlabeled_train_iter.next()
+
+ # measure data loading time
+ data_time.update(time.time() - end)
+
+ batch_size = inputs_x.size(0)
+
+ # Transform label to one-hot
+ targets_x = torch.zeros(batch_size, 10).scatter_(1, targets_x.view(-1, 1), 1)
+
+ if use_cuda:
+ inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)
+ inputs_u = inputs_u.cuda()
+ inputs_u2 = inputs_u2.cuda()
+
+ with torch.no_grad():
+ # compute guessed labels of unlabel samples
+ outputs_u = model(inputs_u)
+ outputs_u2 = model(inputs_u2)
+ p = (torch.softmax(outputs_u, dim=1) + torch.softmax(outputs_u2, dim=1)) / 2
+ pt = p ** (1 / args.T)
+ targets_u = pt / pt.sum(dim=1, keepdim=True)
+ targets_u = targets_u.detach()
+
+ # mixup
+ all_inputs = torch.cat([inputs_x, inputs_u, inputs_u2], dim=0)
+ all_targets = torch.cat([targets_x, targets_u, targets_u], dim=0)
+
+ l = np.random.beta(args.alpha, args.alpha)
+
+ l = max(l, 1 - l)
+
+ idx = torch.randperm(all_inputs.size(0))
+
+ input_a, input_b = all_inputs, all_inputs[idx]
+ target_a, target_b = all_targets, all_targets[idx]
+
+ mixed_input = l * input_a + (1 - l) * input_b
+ mixed_target = l * target_a + (1 - l) * target_b
+
+ # interleave labeled and unlabed samples between batches to get correct batchnorm calculation
+ mixed_input = list(torch.split(mixed_input, batch_size))
+ mixed_input = interleave(mixed_input, batch_size)
+
+ logits = [model(mixed_input[0])]
+ for input in mixed_input[1:]:
+ logits.append(model(input))
+
+ # put interleaved samples back
+ logits = interleave(logits, batch_size)
+ logits_x = logits[0]
+ logits_u = torch.cat(logits[1:], dim=0)
+
+ Lx, Lu, w = criterion(logits_x, mixed_target[:batch_size], logits_u, mixed_target[batch_size:],
+ epoch + batch_idx / args.val_iteration)
+
+ loss = Lx + w * Lu
+
+ # record loss
+ losses.update(loss.item(), inputs_x.size(0))
+ losses_x.update(Lx.item(), inputs_x.size(0))
+ losses_u.update(Lu.item(), inputs_x.size(0))
+ ws.update(w, inputs_x.size(0))
+
+ # compute gradient and do SGD step
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+ ema_optimizer.step()
+
+ # measure elapsed time
+ batch_time.update(time.time() - end)
+ end = time.time()
+
+ # plot progress
+ bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Loss_x: {loss_x:.4f} | Loss_u: {loss_u:.4f} | W: {w:.4f}'.format(
+ batch=batch_idx + 1,
+ size=args.val_iteration,
+ data=data_time.avg,
+ bt=batch_time.avg,
+ total=bar.elapsed_td,
+ eta=bar.eta_td,
+ loss=losses.avg,
+ loss_x=losses_x.avg,
+ loss_u=losses_u.avg,
+ w=ws.avg,
+ )
+ bar.next()
+ bar.finish()
+
+ ema_optimizer.step(bn=True)
+
+ return (losses.avg, losses_x.avg, losses_u.avg,)
+
+
+def validate(valloader, model, criterion, epoch, use_cuda, mode):
+ batch_time = AverageMeter()
+ data_time = AverageMeter()
+ losses = AverageMeter()
+ top1 = AverageMeter()
+ top5 = AverageMeter()
+
+ # switch to evaluate mode
+ model.eval()
+
+ end = time.time()
+ bar = Bar('{mode}', max=len(valloader))
+ with torch.no_grad():
+ for batch_idx, (inputs,_,_, targets) in enumerate(valloader):
+ # measure data loading time
+ data_time.update(time.time() - end)
+
+ if use_cuda:
+ inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
+
+ # compute output
+ outputs = model(inputs)
+ loss = criterion(outputs, targets)
+
+ # measure accuracy and record loss
+ prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
+ losses.update(loss.item(), inputs.size(0))
+ top1.update(prec1.item(), inputs.size(0))
+ top5.update(prec5.item(), inputs.size(0))
+
+ # measure elapsed time
+ batch_time.update(time.time() - end)
+ end = time.time()
+
+ # plot progress
+ bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
+ batch=batch_idx + 1,
+ size=len(valloader),
+ data=data_time.avg,
+ bt=batch_time.avg,
+ total=bar.elapsed_td,
+ eta=bar.eta_td,
+ loss=losses.avg,
+ top1=top1.avg,
+ top5=top5.avg,
+ )
+ bar.next()
+ bar.finish()
+ return (losses.avg, top1.avg)
+
+
+def save_checkpoint(state, is_best, checkpoint=args.out, filename='checkpoint.pth.tar'):
+ filepath = os.path.join(checkpoint, filename)
+ torch.save(state, filepath)
+ if is_best:
+ shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
+
+
+def linear_rampup(current, rampup_length=16):
+ if rampup_length == 0:
+ return 1.0
+ else:
+ current = np.clip(current / rampup_length, 0.0, 1.0)
+ return float(current)
+
+
+class SemiLoss(object):
+ def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch):
+ probs_u = torch.softmax(outputs_u, dim=1)
+
+ Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))
+ Lu = torch.mean((probs_u - targets_u) ** 2)
+
+ return Lx, Lu, args.lambda_u * linear_rampup(epoch)
+
+
+class WeightEMA(object):
+ def __init__(self, model, ema_model, run_type=0, alpha=0.999):
+ self.model = model
+ self.ema_model = ema_model
+ self.alpha = alpha
+ if run_type == 1:
+ self.tmp_model = Classifier(num_classes=10).cuda()
+ else:
+ self.tmp_model = WideResNet(num_classes=10).cuda()
+ self.wd = 0.02 * args.lr
+
+ for param, ema_param in zip(self.model.parameters(), self.ema_model.parameters()):
+ ema_param.data.copy_(param.data)
+
+ def step(self, bn=False):
+ if bn:
+ # copy batchnorm stats to ema model
+ for ema_param, tmp_param in zip(self.ema_model.parameters(), self.tmp_model.parameters()):
+ tmp_param.data.copy_(ema_param.data.detach())
+
+ self.ema_model.load_state_dict(self.model.state_dict())
+
+ for ema_param, tmp_param in zip(self.ema_model.parameters(), self.tmp_model.parameters()):
+ ema_param.data.copy_(tmp_param.data.detach())
+ else:
+ one_minus_alpha = 1.0 - self.alpha
+ for param, ema_param in zip(self.model.parameters(), self.ema_model.parameters()):
+ ema_param.data.mul_(self.alpha)
+ ema_param.data.add_(param.data.detach() * one_minus_alpha)
+ # customized weight decay
+ param.data.mul_(1 - self.wd)
+
+
+def interleave_offsets(batch, nu):
+ groups = [batch // (nu + 1)] * (nu + 1)
+ for x in range(batch - sum(groups)):
+ groups[-x - 1] += 1
+ offsets = [0]
+ for g in groups:
+ offsets.append(offsets[-1] + g)
+ assert offsets[-1] == batch
+ return offsets
+
+
+def interleave(xy, batch):
+ nu = len(xy) - 1
+ offsets = interleave_offsets(batch, nu)
+ xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]
+ for i in range(1, nu + 1):
+ xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
+ return [torch.cat(v, dim=0) for v in xy]
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/utils/__init__.py b/utils/__init__.py
index 7f0bae2..51c7cb3 100644
--- a/utils/__init__.py
+++ b/utils/__init__.py
@@ -6,5 +6,4 @@
# progress bar
import os, sys
-sys.path.append(os.path.join(os.path.dirname(__file__), "progress"))
-from progress.bar import Bar as Bar
\ No newline at end of file
+from utils.progress.progress.bar import Bar as Bar
\ No newline at end of file
diff --git a/utils/progress/LICENSE b/utils/progress/LICENSE
new file mode 100644
index 0000000..059cc05
--- /dev/null
+++ b/utils/progress/LICENSE
@@ -0,0 +1,13 @@
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/utils/progress/MANIFEST.in b/utils/progress/MANIFEST.in
new file mode 100644
index 0000000..0c73842
--- /dev/null
+++ b/utils/progress/MANIFEST.in
@@ -0,0 +1 @@
+include README.rst LICENSE
diff --git a/utils/progress/README.rst b/utils/progress/README.rst
new file mode 100644
index 0000000..3f3be76
--- /dev/null
+++ b/utils/progress/README.rst
@@ -0,0 +1,131 @@
+Easy progress reporting for Python
+==================================
+
+|pypi|
+
+|demo|
+
+.. |pypi| image:: https://img.shields.io/pypi/v/progress.svg
+.. |demo| image:: https://raw.github.com/verigak/progress/master/demo.gif
+ :alt: Demo
+
+Bars
+----
+
+There are 7 progress bars to choose from:
+
+- ``Bar``
+- ``ChargingBar``
+- ``FillingSquaresBar``
+- ``FillingCirclesBar``
+- ``IncrementalBar``
+- ``PixelBar``
+- ``ShadyBar``
+
+To use them, just call ``next`` to advance and ``finish`` to finish:
+
+.. code-block:: python
+
+ from progress.bar import Bar
+
+ bar = Bar('Processing', max=20)
+ for i in range(20):
+ # Do some work
+ bar.next()
+ bar.finish()
+
+The result will be a bar like the following: ::
+
+ Processing |############# | 42/100
+
+To simplify the common case where the work is done in an iterator, you can
+use the ``iter`` method:
+
+.. code-block:: python
+
+ for i in Bar('Processing').iter(it):
+ # Do some work
+
+Progress bars are very customizable, you can change their width, their fill
+character, their suffix and more:
+
+.. code-block:: python
+
+ bar = Bar('Loading', fill='@', suffix='%(percent)d%%')
+
+This will produce a bar like the following: ::
+
+ Loading |@@@@@@@@@@@@@ | 42%
+
+You can use a number of template arguments in ``message`` and ``suffix``:
+
+========== ================================
+Name Value
+========== ================================
+index current value
+max maximum value
+remaining max - index
+progress index / max
+percent progress * 100
+avg simple moving average time per item (in seconds)
+elapsed elapsed time in seconds
+elapsed_td elapsed as a timedelta (useful for printing as a string)
+eta avg * remaining
+eta_td eta as a timedelta (useful for printing as a string)
+========== ================================
+
+Instead of passing all configuration options on instatiation, you can create
+your custom subclass:
+
+.. code-block:: python
+
+ class FancyBar(Bar):
+ message = 'Loading'
+ fill = '*'
+ suffix = '%(percent).1f%% - %(eta)ds'
+
+You can also override any of the arguments or create your own:
+
+.. code-block:: python
+
+ class SlowBar(Bar):
+ suffix = '%(remaining_hours)d hours remaining'
+ @property
+ def remaining_hours(self):
+ return self.eta // 3600
+
+
+Spinners
+========
+
+For actions with an unknown number of steps you can use a spinner:
+
+.. code-block:: python
+
+ from progress.spinner import Spinner
+
+ spinner = Spinner('Loading ')
+ while state != 'FINISHED':
+ # Do some work
+ spinner.next()
+
+There are 5 predefined spinners:
+
+- ``Spinner``
+- ``PieSpinner``
+- ``MoonSpinner``
+- ``LineSpinner``
+- ``PixelSpinner``
+
+
+Other
+=====
+
+There are a number of other classes available too, please check the source or
+subclass one of them to create your own.
+
+
+License
+=======
+
+progress is licensed under ISC
diff --git a/utils/progress/__init__.py b/utils/progress/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/utils/progress/progress/__init__.py b/utils/progress/progress/__init__.py
new file mode 100644
index 0000000..09dfc1e
--- /dev/null
+++ b/utils/progress/progress/__init__.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import division
+
+from collections import deque
+from datetime import timedelta
+from math import ceil
+from sys import stderr
+from time import time
+
+
+__version__ = '1.3'
+
+
+class Infinite(object):
+ file = stderr
+ sma_window = 10 # Simple Moving Average window
+
+ def __init__(self, *args, **kwargs):
+ self.index = 0
+ self.start_ts = time()
+ self.avg = 0
+ self._ts = self.start_ts
+ self._xput = deque(maxlen=self.sma_window)
+ for key, val in kwargs.items():
+ setattr(self, key, val)
+
+ def __getitem__(self, key):
+ if key.startswith('_'):
+ return None
+ return getattr(self, key, None)
+
+ @property
+ def elapsed(self):
+ return int(time() - self.start_ts)
+
+ @property
+ def elapsed_td(self):
+ return timedelta(seconds=self.elapsed)
+
+ def update_avg(self, n, dt):
+ if n > 0:
+ self._xput.append(dt / n)
+ self.avg = sum(self._xput) / len(self._xput)
+
+ def update(self):
+ pass
+
+ def start(self):
+ pass
+
+ def finish(self):
+ pass
+
+ def next(self, n=1):
+ now = time()
+ dt = now - self._ts
+ self.update_avg(n, dt)
+ self._ts = now
+ self.index = self.index + n
+ self.update()
+
+ def iter(self, it):
+ try:
+ for x in it:
+ yield x
+ self.next()
+ finally:
+ self.finish()
+
+
+class Progress(Infinite):
+ def __init__(self, *args, **kwargs):
+ super(Progress, self).__init__(*args, **kwargs)
+ self.max = kwargs.get('max', 100)
+
+ @property
+ def eta(self):
+ return int(ceil(self.avg * self.remaining))
+
+ @property
+ def eta_td(self):
+ return timedelta(seconds=self.eta)
+
+ @property
+ def percent(self):
+ return self.progress * 100
+
+ @property
+ def progress(self):
+ return min(1, self.index / self.max)
+
+ @property
+ def remaining(self):
+ return max(self.max - self.index, 0)
+
+ def start(self):
+ self.update()
+
+ def goto(self, index):
+ incr = index - self.index
+ self.next(incr)
+
+ def iter(self, it):
+ try:
+ self.max = len(it)
+ except TypeError:
+ pass
+
+ try:
+ for x in it:
+ yield x
+ self.next()
+ finally:
+ self.finish()
diff --git a/utils/progress/progress/__init__.pyc b/utils/progress/progress/__init__.pyc
new file mode 100644
index 0000000..352231f
Binary files /dev/null and b/utils/progress/progress/__init__.pyc differ
diff --git a/utils/progress/progress/bar.py b/utils/progress/progress/bar.py
new file mode 100644
index 0000000..5ee968f
--- /dev/null
+++ b/utils/progress/progress/bar.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+from . import Progress
+from .helpers import WritelnMixin
+
+
+class Bar(WritelnMixin, Progress):
+ width = 32
+ message = ''
+ suffix = '%(index)d/%(max)d'
+ bar_prefix = ' |'
+ bar_suffix = '| '
+ empty_fill = ' '
+ fill = '#'
+ hide_cursor = True
+
+ def update(self):
+ filled_length = int(self.width * self.progress)
+ empty_length = self.width - filled_length
+
+ message = self.message % self
+ bar = self.fill * filled_length
+ empty = self.empty_fill * empty_length
+ suffix = self.suffix % self
+ line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
+ suffix])
+ self.writeln(line)
+
+
+class ChargingBar(Bar):
+ suffix = '%(percent)d%%'
+ bar_prefix = ' '
+ bar_suffix = ' '
+ empty_fill = '∙'
+ fill = '█'
+
+
+class FillingSquaresBar(ChargingBar):
+ empty_fill = '▢'
+ fill = '▣'
+
+
+class FillingCirclesBar(ChargingBar):
+ empty_fill = '◯'
+ fill = '◉'
+
+
+class IncrementalBar(Bar):
+ phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
+
+ def update(self):
+ nphases = len(self.phases)
+ filled_len = self.width * self.progress
+ nfull = int(filled_len) # Number of full chars
+ phase = int((filled_len - nfull) * nphases) # Phase of last char
+ nempty = self.width - nfull # Number of empty chars
+
+ message = self.message % self
+ bar = self.phases[-1] * nfull
+ current = self.phases[phase] if phase > 0 else ''
+ empty = self.empty_fill * max(0, nempty - len(current))
+ suffix = self.suffix % self
+ line = ''.join([message, self.bar_prefix, bar, current, empty,
+ self.bar_suffix, suffix])
+ self.writeln(line)
+
+
+class PixelBar(IncrementalBar):
+ phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿')
+
+
+class ShadyBar(IncrementalBar):
+ phases = (' ', '░', '▒', '▓', '█')
diff --git a/utils/progress/progress/bar.pyc b/utils/progress/progress/bar.pyc
new file mode 100644
index 0000000..7be163f
Binary files /dev/null and b/utils/progress/progress/bar.pyc differ
diff --git a/utils/progress/progress/counter.py b/utils/progress/progress/counter.py
new file mode 100644
index 0000000..6b45a1e
--- /dev/null
+++ b/utils/progress/progress/counter.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+from . import Infinite, Progress
+from .helpers import WriteMixin
+
+
+class Counter(WriteMixin, Infinite):
+ message = ''
+ hide_cursor = True
+
+ def update(self):
+ self.write(str(self.index))
+
+
+class Countdown(WriteMixin, Progress):
+ hide_cursor = True
+
+ def update(self):
+ self.write(str(self.remaining))
+
+
+class Stack(WriteMixin, Progress):
+ phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█')
+ hide_cursor = True
+
+ def update(self):
+ nphases = len(self.phases)
+ i = min(nphases - 1, int(self.progress * nphases))
+ self.write(self.phases[i])
+
+
+class Pie(Stack):
+ phases = ('○', '◔', '◑', '◕', '●')
diff --git a/utils/progress/progress/helpers.py b/utils/progress/progress/helpers.py
new file mode 100644
index 0000000..9ed90b2
--- /dev/null
+++ b/utils/progress/progress/helpers.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import print_function
+
+
+HIDE_CURSOR = '\x1b[?25l'
+SHOW_CURSOR = '\x1b[?25h'
+
+
+class WriteMixin(object):
+ hide_cursor = False
+
+ def __init__(self, message=None, **kwargs):
+ super(WriteMixin, self).__init__(**kwargs)
+ self._width = 0
+ if message:
+ self.message = message
+
+ if self.file.isatty():
+ if self.hide_cursor:
+ print(HIDE_CURSOR, end='', file=self.file)
+ print(self.message, end='', file=self.file)
+ self.file.flush()
+
+ def write(self, s):
+ if self.file.isatty():
+ b = '\b' * self._width
+ c = s.ljust(self._width)
+ print(b + c, end='', file=self.file)
+ self._width = max(self._width, len(s))
+ self.file.flush()
+
+ def finish(self):
+ if self.file.isatty() and self.hide_cursor:
+ print(SHOW_CURSOR, end='', file=self.file)
+
+
+class WritelnMixin(object):
+ hide_cursor = False
+
+ def __init__(self, message=None, **kwargs):
+ super(WritelnMixin, self).__init__(**kwargs)
+ if message:
+ self.message = message
+
+ if self.file.isatty() and self.hide_cursor:
+ print(HIDE_CURSOR, end='', file=self.file)
+
+ def clearln(self):
+ if self.file.isatty():
+ print('\r\x1b[K', end='', file=self.file)
+
+ def writeln(self, line):
+ if self.file.isatty():
+ self.clearln()
+ print(line, end='', file=self.file)
+ self.file.flush()
+
+ def finish(self):
+ if self.file.isatty():
+ print(file=self.file)
+ if self.hide_cursor:
+ print(SHOW_CURSOR, end='', file=self.file)
+
+
+from signal import signal, SIGINT
+from sys import exit
+
+
+class SigIntMixin(object):
+ """Registers a signal handler that calls finish on SIGINT"""
+
+ def __init__(self, *args, **kwargs):
+ super(SigIntMixin, self).__init__(*args, **kwargs)
+ signal(SIGINT, self._sigint_handler)
+
+ def _sigint_handler(self, signum, frame):
+ self.finish()
+ exit(0)
diff --git a/utils/progress/progress/helpers.pyc b/utils/progress/progress/helpers.pyc
new file mode 100644
index 0000000..edd5660
Binary files /dev/null and b/utils/progress/progress/helpers.pyc differ
diff --git a/utils/progress/progress/spinner.py b/utils/progress/progress/spinner.py
new file mode 100644
index 0000000..464c7b2
--- /dev/null
+++ b/utils/progress/progress/spinner.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+from . import Infinite
+from .helpers import WriteMixin
+
+
+class Spinner(WriteMixin, Infinite):
+ message = ''
+ phases = ('-', '\\', '|', '/')
+ hide_cursor = True
+
+ def update(self):
+ i = self.index % len(self.phases)
+ self.write(self.phases[i])
+
+
+class PieSpinner(Spinner):
+ phases = ['◷', '◶', '◵', '◴']
+
+
+class MoonSpinner(Spinner):
+ phases = ['◑', '◒', '◐', '◓']
+
+
+class LineSpinner(Spinner):
+ phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
+
+class PixelSpinner(Spinner):
+ phases = ['⣾','⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽']
diff --git a/utils/progress/setup.py b/utils/progress/setup.py
new file mode 100644
index 0000000..c877781
--- /dev/null
+++ b/utils/progress/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+import progress
+
+
+setup(
+ name='progress',
+ version=progress.__version__,
+ description='Easy to use progress bars',
+ long_description=open('README.rst').read(),
+ author='Giorgos Verigakis',
+ author_email='verigak@gmail.com',
+ url='http://github.com/verigak/progress/',
+ license='ISC',
+ packages=['progress'],
+ classifiers=[
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: ISC License (ISCL)',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ ]
+)
diff --git a/utils/progress/test_progress.py b/utils/progress/test_progress.py
new file mode 100644
index 0000000..0f68b01
--- /dev/null
+++ b/utils/progress/test_progress.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import random
+import time
+
+from progress.bar import (Bar, ChargingBar, FillingSquaresBar,
+ FillingCirclesBar, IncrementalBar, PixelBar,
+ ShadyBar)
+from progress.spinner import (Spinner, PieSpinner, MoonSpinner, LineSpinner,
+ PixelSpinner)
+from progress.counter import Counter, Countdown, Stack, Pie
+
+
+def sleep():
+ t = 0.01
+ t += t * random.uniform(-0.1, 0.1) # Add some variance
+ time.sleep(t)
+
+
+for bar_cls in (Bar, ChargingBar, FillingSquaresBar, FillingCirclesBar):
+ suffix = '%(index)d/%(max)d [%(elapsed)d / %(eta)d / %(eta_td)s]'
+ bar = bar_cls(bar_cls.__name__, suffix=suffix)
+ for i in bar.iter(range(200)):
+ sleep()
+
+for bar_cls in (IncrementalBar, PixelBar, ShadyBar):
+ suffix = '%(percent)d%% [%(elapsed_td)s / %(eta)d / %(eta_td)s]'
+ bar = bar_cls(bar_cls.__name__, suffix=suffix)
+ for i in bar.iter(range(200)):
+ sleep()
+
+for spin in (Spinner, PieSpinner, MoonSpinner, LineSpinner, PixelSpinner):
+ for i in spin(spin.__name__ + ' ').iter(range(100)):
+ sleep()
+ print()
+
+for singleton in (Counter, Countdown, Stack, Pie):
+ for i in singleton(singleton.__name__ + ' ').iter(range(100)):
+ sleep()
+ print()
+
+bar = IncrementalBar('Random', suffix='%(index)d')
+for i in range(100):
+ bar.goto(random.randint(0, 100))
+ sleep()
+bar.finish()