Module vipy.data.stanford_dogs
Expand source code Browse git
import os
import vipy
from vipy.util import filebase
URLS = ['http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar',
'http://vision.stanford.edu/aditya86/ImageNetDogs/annotation.tar']
class StanfordDogs(vipy.dataset.Dataset):
def __init__(self, datadir=vipy.util.tocache('stanford_dogs'), redownload=False):
self._datadir = vipy.util.remkdir(datadir)
for url in URLS:
if redownload or not os.path.exists(os.path.join(datadir, vipy.util.filetail(url))):
vipy.downloader.download_and_unpack(url, self._datadir)
# Read cached XML
xmlfiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(os.path.join(self._datadir, 'Annotation')) for f in filenames]
d_imgname_to_annotation = {}
for xmlfile in xmlfiles:
with open(xmlfile, 'r') as f:
data = f.read()
imgname = vipy.util.filetail(xmlfile)
d_imgname_to_annotation[imgname] = {'xmin':int(data.split('<xmin>',1)[1].split('</xmin>',1)[0]),
'ymin':int(data.split('<ymin>',1)[1].split('</ymin>',1)[0]),
'xmax':int(data.split('<xmax>',1)[1].split('</xmax>',1)[0]),
'ymax':int(data.split('<ymax>',1)[1].split('</ymax>',1)[0])}
# Read images
imgfiles = vipy.util.findimages(os.path.join(self._datadir, 'Images'))
imlist = [(f,
d_imgname_to_annotation[filebase(f)]['xmin'] if filebase(f) in d_imgname_to_annotation else None,
d_imgname_to_annotation[filebase(f)]['ymin'] if filebase(f) in d_imgname_to_annotation else None,
d_imgname_to_annotation[filebase(f)]['xmax'] if filebase(f) in d_imgname_to_annotation else None,
d_imgname_to_annotation[filebase(f)]['ymax'] if filebase(f) in d_imgname_to_annotation else None,
os.path.dirname(f).rsplit('-',1)[1])
for f in imgfiles]
loader = lambda x: vipy.image.ImageDetection(filename=x[0], xmin=x[1], ymin=x[2], xmax=x[3], ymax=x[4], category=x[5])
super().__init__(imlist, id='stanford_dogs', loader=loader)
Classes
class StanfordDogs (datadir='/Users/jebyrne/.vipy/stanford_dogs', redownload=False)
-
vipy.dataset.Dataset() class
Common class to manipulate large sets of objects in parallel
Args
- dataset [list, tuple, set, obj]: a python built-in type that supports indexing or a generic object that supports indexing and has a length
- id [str]: an optional id of this dataset, which provides a descriptive name of the dataset
- loader [callable]: a callable loader that will construct the object from a raw data element in dataset. This is useful for custom deerialization or on demand transformations Datasets can be indexed, shuffled, iterated, minibatched, sorted, sampled, partitioned. Datasets constructed of vipy objects are lazy loaded, delaying loading pixels until they are needed
(trainset, valset, testset) = vipy.dataset.registry('mnist') (trainset, valset) = trainset.partition(0.9, 0.1) categories = trainset.set(lambda im: im.category()) smaller = testset.take(1024) preprocessed = smaller.map(lambda im: im.resize(32, 32).gain(1/256)) for b in preprocessed.minibatch(128): print(b) # visualize the dataset (trainset, valset, testset) = vipy.dataset.registry('pascal_voc_2007') for im in trainset: im.mindim(1024).show().print(sleep=1).close()
Datasets can be constructed from directories of json files or image files (
Dataset.from_directory()
) Datasets can be constructed from a single json file containing a list of objects (Dataset.from_json()
)Note: that if a lambda function is provided as loader then this dataset is not serializable. Use self.load() then serialize
Expand source code Browse git
class StanfordDogs(vipy.dataset.Dataset): def __init__(self, datadir=vipy.util.tocache('stanford_dogs'), redownload=False): self._datadir = vipy.util.remkdir(datadir) for url in URLS: if redownload or not os.path.exists(os.path.join(datadir, vipy.util.filetail(url))): vipy.downloader.download_and_unpack(url, self._datadir) # Read cached XML xmlfiles = [os.path.join(dp, f) for dp, dn, filenames in os.walk(os.path.join(self._datadir, 'Annotation')) for f in filenames] d_imgname_to_annotation = {} for xmlfile in xmlfiles: with open(xmlfile, 'r') as f: data = f.read() imgname = vipy.util.filetail(xmlfile) d_imgname_to_annotation[imgname] = {'xmin':int(data.split('<xmin>',1)[1].split('</xmin>',1)[0]), 'ymin':int(data.split('<ymin>',1)[1].split('</ymin>',1)[0]), 'xmax':int(data.split('<xmax>',1)[1].split('</xmax>',1)[0]), 'ymax':int(data.split('<ymax>',1)[1].split('</ymax>',1)[0])} # Read images imgfiles = vipy.util.findimages(os.path.join(self._datadir, 'Images')) imlist = [(f, d_imgname_to_annotation[filebase(f)]['xmin'] if filebase(f) in d_imgname_to_annotation else None, d_imgname_to_annotation[filebase(f)]['ymin'] if filebase(f) in d_imgname_to_annotation else None, d_imgname_to_annotation[filebase(f)]['xmax'] if filebase(f) in d_imgname_to_annotation else None, d_imgname_to_annotation[filebase(f)]['ymax'] if filebase(f) in d_imgname_to_annotation else None, os.path.dirname(f).rsplit('-',1)[1]) for f in imgfiles] loader = lambda x: vipy.image.ImageDetection(filename=x[0], xmin=x[1], ymin=x[2], xmax=x[3], ymax=x[4], category=x[5]) super().__init__(imlist, id='stanford_dogs', loader=loader)
Ancestors
Inherited members
Dataset
:balanced
batch
chunk
chunks
clone
count
even_split
filter
frequency
from_directory
from_image_urls
groupby
id
identity_shuffler
index
inverse_frequency
list
load
localmap
map
minibatch
partition
pipeline
raw
repeat
sample
set
shift
shuffle
slice
sort
split
streaming_map
streaming_shuffler
take
take_fraction
takeby
takelist
takeone
truncate
tuple
uniform_shuffler
zip