Module vipy.data.coil100
Expand source code Browse git
import os
from vipy.util import remkdir, tocache, filebase
import vipy.downloader
from vipy.dataset import Dataset
from vipy.image import ImageCategory
URL = 'http://www.cs.columbia.edu/CAVE/databases/SLAM_coil-20_coil-100/coil-100/coil-100.zip'
SHA1 = '402d86b63cf3ace831f2af03bc9889e5e5c3dd1a'
class COIL100(Dataset):
def __init__(self, datadir=None, redownload=False):
datadir = tocache('coil100') if datadir is None else datadir
# Download
self._datadir = remkdir(datadir)
if redownload or not os.path.exists(os.path.join(self._datadir, '.complete')):
vipy.downloader.download_and_unpack(URL, self._datadir, sha1=SHA1)
# Create dataset
imlist = []
imgdir = os.path.join(self._datadir, 'coil-100')
for f in os.listdir(imgdir):
if '__' in f:
imlist.append(f)
loader = lambda f, imgdir=imgdir: ImageCategory(filename=os.path.join(imgdir, f), category=f.split('__')[0], attributes={'orientation':filebase(f).split('__')[1]})
super().__init__(imlist, id='coil100', loader=loader)
open(os.path.join(self._datadir, '.complete'), 'a').close()
Classes
class COIL100 (datadir=None, redownload=False)
-
vipy.dataset.Dataset() class
Common class to manipulate large sets of objects in parallel
Args
- dataset [list, tuple, set, obj]: a python built-in type that supports indexing or a generic object that supports indexing and has a length
- id [str]: an optional id of this dataset, which provides a descriptive name of the dataset
- loader [callable]: a callable loader that will construct the object from a raw data element in dataset. This is useful for custom deerialization or on demand transformations Datasets can be indexed, shuffled, iterated, minibatched, sorted, sampled, partitioned. Datasets constructed of vipy objects are lazy loaded, delaying loading pixels until they are needed
(trainset, valset, testset) = vipy.dataset.registry('mnist') (trainset, valset) = trainset.partition(0.9, 0.1) categories = trainset.set(lambda im: im.category()) smaller = testset.take(1024) preprocessed = smaller.map(lambda im: im.resize(32, 32).gain(1/256)) for b in preprocessed.minibatch(128): print(b) # visualize the dataset (trainset, valset, testset) = vipy.dataset.registry('pascal_voc_2007') for im in trainset: im.mindim(1024).show().print(sleep=1).close()
Datasets can be constructed from directories of json files or image files (
Dataset.from_directory()
) Datasets can be constructed from a single json file containing a list of objects (Dataset.from_json()
)Note: that if a lambda function is provided as loader then this dataset is not serializable. Use self.load() then serialize
Expand source code Browse git
class COIL100(Dataset): def __init__(self, datadir=None, redownload=False): datadir = tocache('coil100') if datadir is None else datadir # Download self._datadir = remkdir(datadir) if redownload or not os.path.exists(os.path.join(self._datadir, '.complete')): vipy.downloader.download_and_unpack(URL, self._datadir, sha1=SHA1) # Create dataset imlist = [] imgdir = os.path.join(self._datadir, 'coil-100') for f in os.listdir(imgdir): if '__' in f: imlist.append(f) loader = lambda f, imgdir=imgdir: ImageCategory(filename=os.path.join(imgdir, f), category=f.split('__')[0], attributes={'orientation':filebase(f).split('__')[1]}) super().__init__(imlist, id='coil100', loader=loader) open(os.path.join(self._datadir, '.complete'), 'a').close()
Ancestors
Inherited members
Dataset
:balanced
batch
chunk
chunks
clone
count
even_split
filter
frequency
from_directory
from_image_urls
groupby
id
identity_shuffler
index
inverse_frequency
list
load
localmap
map
minibatch
partition
pipeline
raw
repeat
sample
set
shift
shuffle
slice
sort
split
streaming_map
streaming_shuffler
take
take_fraction
takeby
takelist
takeone
truncate
tuple
uniform_shuffler
zip