Skip to content

Commit

Permalink
api docs complete
Browse files Browse the repository at this point in the history
  • Loading branch information
achaiah committed May 8, 2019
1 parent 8e0b32f commit a238a11
Show file tree
Hide file tree
Showing 22 changed files with 117 additions and 384 deletions.
16 changes: 1 addition & 15 deletions LICENSE.txt
Original file line number Diff line number Diff line change
@@ -1,22 +1,8 @@
COPYRIGHT

Some contributions by Nicholas Cullen:
Copyright (c) 2017, Nicholas Cullen:
Copyright (c) 2019, Achaiah.
All rights reserved.

Some contributions by François Chollet:
Copyright (c) 2015, François Chollet.
All rights reserved.

Some contributions by Google:
Copyright (c) 2015, Google, Inc.
All rights reserved.

All other contributions:
Copyright (c) 2015, the respective contributors.
All rights reserved.


LICENSE

The MIT License (MIT)
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ work in progress though so apologies for anything that's broken.
## Install
Substitute version number as necessary:

`pip install git+https://github.com/achaiah/[email protected].2`
`pip install git+https://github.com/achaiah/[email protected].3`

## ModuleTrainer
The `ModuleTrainer` class provides a high-level training interface which abstracts
Expand Down
3 changes: 3 additions & 0 deletions docs/source/classification_guide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## Classification

Our approach is pretty straightforward
2 changes: 0 additions & 2 deletions docs/source/classification_guide.rst

This file was deleted.

7 changes: 4 additions & 3 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
copyright = u'2019, Achaiah'
author = u'Achaiah'

# UNCOMMENT to generate local documentation
'''
# UNCOMMENT to generate local documentation
version_file = '../../pywick/__init__.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
Expand All @@ -34,13 +34,14 @@
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
'''
'''
# UNCOMMENT to generate readthedocs.io documentation
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.5.2'
release = '0.5.3'


# -- General configuration ---------------------------------------------------

Expand Down
2 changes: 2 additions & 0 deletions docs/source/segmentation_guide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
## Segmentation

2 changes: 0 additions & 2 deletions docs/source/segmentation_guide.rst

This file was deleted.

2 changes: 1 addition & 1 deletion pywick/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = '0.5.2'
__version__ = '0.5.3'
__author__ = 'Achaiah'
__description__ = 'High-level batteries-included neural network training library for Pytorch'

Expand Down
22 changes: 16 additions & 6 deletions pywick/data_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import os.path
import argparse

from .functions.meanstd import get_dataset_mean_std
from .datasets.FolderDataset import FolderDataset, rgb_image_loader

opt = dict()
Expand All @@ -22,7 +21,17 @@
# print('removing: ', rem)
opt.pop(rem)

def create_dataset_stats(data_path, output_path=None):
dataset_mean_std = {
'imagenet': ([0.485, 0.456, 0.406]),
'general': ([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
}


def get_dataset_mean_std(dataset_name='imagenet'):
return dataset_mean_std[dataset_name]


def create_dataset_stats(data_path, output_path=None, verbose=False):
'''
Generates statistics for the given dataset and writes them to a JSON file. Expects the data to be in the following dir structure:
dataroot
Expand All @@ -47,10 +56,11 @@ def create_dataset_stats(data_path, output_path=None):
mean, std = get_dataset_mean_std(dataset, img_size=256)
stats['mean'], stats['std'] = mean.tolist(), std.tolist() # convert from numpy array to python

print('------- Dataset Stats --------')
print(stats)
print('Written to: ', output_path)
print('------ End Dataset Stats ------')
if verbose:
print('------- Dataset Stats --------')
print(stats)
print('Written to: ', output_path)
print('------ End Dataset Stats ------')

with open(output_path, 'a') as statsfile:
json.dump(stats, statsfile)
Expand Down
2 changes: 1 addition & 1 deletion pywick/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
"""

from . import BaseDataset, ClonedFolderDataset, CSVDataset, FolderDataset, PredictFolderDataset, UsefulDataset, data_utils
from .tnt import *
from .tnt import *
62 changes: 62 additions & 0 deletions pywick/datasets/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import warnings

import numpy as np
import tqdm

try:
from PIL import Image
Expand Down Expand Up @@ -44,16 +45,19 @@ def pil_loader(path, color_space=''):


def pil_loader_rgb(path):
"""Convenience loader for RGB files (e.g. `.jpg`)"""
with open(path, 'rb', 0) as f:
return Image.open(f).convert('RGB')


def pil_loader_bw(path):
"""Convenience loader for B/W files (e.g. `.png with only one color chanel`)"""
with open(path, 'rb', 0) as f:
return Image.open(f).convert('L')


def npy_loader(path, color_space=None): # color space is unused here
"""Convenience loader for numeric files (e.g. arrays of numbers)"""
return np.load(path)


Expand Down Expand Up @@ -89,6 +93,7 @@ def _process_transform_argument(tform, num_inputs):
tform = [tform] * num_inputs
return tform


def _process_co_transform_argument(tform, num_inputs, num_targets):
tform = tform if tform is not None else _multi_arg_pass_through
if is_tuple_or_list(tform):
Expand All @@ -103,12 +108,15 @@ def _process_co_transform_argument(tform, num_inputs, num_targets):
def _return_first_element_of_list(x):
return x[0]


def _pass_through(x):
return x


def _multi_arg_pass_through(*x):
return x


def _find_classes(dirs):
classes = list()
for dir in dirs:
Expand All @@ -122,9 +130,11 @@ def _find_classes(dirs):
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx


def _is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)


def _finds_inputs_and_targets(root, class_mode, class_to_idx=None, input_regex='*',
rel_target_root='', target_prefix='', target_postfix='', target_extension='png',
splitRatio=1.0, random_seed=None, exclusion_file=None):
Expand Down Expand Up @@ -219,3 +229,55 @@ def _finds_inputs_and_targets(root, class_mode, class_to_idx=None, input_regex='
assert len(trainlist_inputs) == len(trainlist_targets) and len(vallist_inputs) == len(vallist_targets)
print("Total processed: %i Train-list: %i items Val-list: %i items Exclusion-list: %i items" % (icount, len(trainlist_inputs), len(vallist_inputs), len(exclusion_list)))
return list(zip(trainlist_inputs, trainlist_targets)), list(zip(vallist_inputs, vallist_targets))


def get_dataset_mean_std(data_set, img_size=256, output_div=255.0):
"""
Computes channel-wise mean and std of the dataset. The process is memory-intensive as the entire dataset must fit into memory.
Therefore, each image is scaled down to img_size first (default: 256).
Assumptions:
1. dataset uses PIL to read images
2. Images are in RGB format.
:param data_set: (pytorch Dataset)
:param img_size: (int):
scale of images at which to compute mean/std (default: 256)
:param output_div: (float `{1.0, 255.0}`):
Image values are naturally in 0-255 value range so the returned output is divided by output_div. For example, if output_div = 255.0 then mean/std will be in 0-1 range.
:return: (mean, std) as per-channel values ([r,g,b], [r,g,b])
"""

total = np.zeros((3, (len(data_set) * img_size * img_size)), dtype=int)
position = 0 # keep track of position in the total array

for src, _ in tqdm(data_set, ascii=True, desc="Process", unit='images'):
src = src.resize((img_size, img_size)) # resize to same size
src = np.array(src)

# reshape into correct shape
src = src.reshape(img_size * img_size, 3)
src = src.swapaxes(1,0)

# np.concatenate((a, b, c), axis=1) # NOPE NOPE NOPE -- makes a memory re-allocation for every concatenate operation

# -- In-place value substitution -- #
place = img_size * img_size * position
total[0:src.shape[0], place:place+src.shape[1]] = src # copies the src data into the total position at specified index

position = position+1

return total.mean(1) / output_div, total.std(1) / output_div # return channel-wise mean for the entire dataset


if __name__ == "__main__":
from pywick.datasets.FolderDataset import FolderDataset
from pywick.datasets.data_utils import pil_loader_rgb

dataset = FolderDataset(root='/home/users/youruser/images', class_mode='label', default_loader=pil_loader_rgb)
mean, std = get_dataset_mean_std(dataset)
print('----- RESULT -----')
print('mean: {}'.format(mean))
print('std: {}'.format(std))
print ('----- DONE ------')
49 changes: 0 additions & 49 deletions pywick/meanstd.py

This file was deleted.

3 changes: 1 addition & 2 deletions pywick/models/segmentation/testnets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
from .linknext import LinkNext as TEST_Linknext
from .ocnet import OCNet as TEST_OCNet, asp_ocnet_resnet101 as TEST_OCNet_ASP_Res101, base_ocnet_resnet101 as TEST_OCNet_Base_Res101, pyramid_ocnet_resnet101 as TEST_OCNet_Pyr_Res101
from .ocnet import asp_ocnet_resnet152 as TEST_OCNet_ASP_Res152, base_ocnet_resnet152 as TEST_OCNet_Base_Res152, pyramid_ocnet_resnet152 as TEST_OCNet_Pyr_Res152
from .standard_fc_densenets import FCDenseNet103 as TEST_FCDensenet
from .psp_saeed import PSPNet as TEST_PSPNet2
from .tiramisu_test import FCDenseNet57 as TEST_Tiramisu57
from .Unet_nested import UNet_Nested_dilated as TEST_Unet_nested_dilated
from .unet_plus_plus import NestNet as Unet_Plus_Plus
from .unet_plus_plus import NestNet as Unet_Plus_Plus
22 changes: 0 additions & 22 deletions pywick/models/segmentation/testnets/bottleneck.py

This file was deleted.

63 changes: 0 additions & 63 deletions pywick/models/segmentation/testnets/dense_block.py

This file was deleted.

Loading

0 comments on commit a238a11

Please sign in to comment.