名称“_get_ade20k_pairs”未定义
name '_get_ade20k_pairs' is not defined
所以我正在尝试在语义分割中创建预处理数据集的功能。但它告诉我我的功能没有定义。而实际上在那里定义。我的代码是这样的
import os
import random
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image, ImageOps, ImageFilter
__all__ = ['ADE20KSegmentation']
class ADE20KSegmentation(data.Dataset):
BASE_DIR = 'ADEChallengeData2016'
NUM_CLASS = 150
CLASSES = ("wall", "building, edifice", "sky", "floor, flooring", "tree",
"ceiling", "road, route", "bed", "windowpane, window", "grass",
"cabinet", "sidewalk, pavement",
"person, individual, someone, somebody, mortal, soul",
"earth, ground", "door, double door", "table", "mountain, mount",
"plant, flora, plant life", "curtain, drape, drapery, mantle, pall",
"chair", "car, auto, automobile, machine, motorcar",
"water", "painting, picture", "sofa, couch, lounge", "shelf",
"house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair",
"seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press",
"lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion",
"base, pedestal, stand", "box", "column, pillar", "signboard, sign",
"chest of drawers, chest, bureau, dresser", "counter", "sand", "sink",
"skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox",
"grandstand, covered stand", "path", "stairs, steps", "runway",
"case, display case, showcase, vitrine",
"pool table, billiard table, snooker table", "pillow",
"screen door, screen", "stairway, staircase", "river", "bridge, span",
"bookcase", "blind, screen", "coffee table, cocktail table",
"toilet, can, commode, crapper, pot, potty, stool, throne",
"flower", "book", "hill", "bench", "countertop",
"stove, kitchen stove, range, kitchen range, cooking stove",
"palm, palm tree", "kitchen island",
"computer, computing machine, computing device, data processor, "
"electronic computer, information processing system",
"swivel chair", "boat", "bar", "arcade machine",
"hovel, hut, hutch, shack, shanty",
"bus, autobus, coach, charabanc, double-decker, jitney, motorbus, "
"motorcoach, omnibus, passenger vehicle",
"towel", "light, light source", "truck, motortruck", "tower",
"chandelier, pendant, pendent", "awning, sunshade, sunblind",
"streetlight, street lamp", "booth, cubicle, stall, kiosk",
"television receiver, television, television set, tv, tv set, idiot "
"box, boob tube, telly, goggle box",
"airplane, aeroplane, plane", "dirt track",
"apparel, wearing apparel, dress, clothes",
"pole", "land, ground, soil",
"bannister, banister, balustrade, balusters, handrail",
"escalator, moving staircase, moving stairway",
"ottoman, pouf, pouffe, puff, hassock",
"bottle", "buffet, counter, sideboard",
"poster, posting, placard, notice, bill, card",
"stage", "van", "ship", "fountain",
"conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"canopy", "washer, automatic washer, washing machine",
"plaything, toy", "swimming pool, swimming bath, natatorium",
"stool", "barrel, cask", "basket, handbasket", "waterfall, falls",
"tent, collapsible shelter", "bag", "minibike, motorbike", "cradle",
"oven", "ball", "food, solid food", "step, stair", "tank, storage tank",
"trade name, brand name, brand, marque", "microwave, microwave oven",
"pot, flowerpot", "animal, animate being, beast, brute, creature, fauna",
"bicycle, bike, wheel, cycle", "lake",
"dishwasher, dish washer, dishwashing machine",
"screen, silver screen, projection screen",
"blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase",
"traffic light, traffic signal, stoplight", "tray",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, "
"dustbin, trash barrel, trash bin",
"fan", "pier, wharf, wharfage, dock", "crt screen",
"plate", "monitor, monitoring device", "bulletin board, notice board",
"shower", "radiator", "glass, drinking glass", "clock", "flag")
def __init__(self, root='/content/dataset', split='training', mode=None, transform=None,
base_size=520, crop_size=480, **kwargs):
super(ADE20KSegmentation, self).__init__()
self.root = root
self.split = split
self.mode = mode if mode is not None else split
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
self.images, self.mask_paths = _get_ade20k_pairs(self.root, self.split)
assert (len(self.images) == len(self.mask_paths))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: " + self.root + "\n")
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
self._key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')
def _class_to_index(self, mask):
values = np.unique(mask)
for value in values:
assert (value in self._mapping)
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)
def __getitem__(self, index):
img = PIL.Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = PIL.Image.open(self.mask_paths[index])
# synchrosized transform
if self.mode == 'training':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'valdation':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _img_transform(self, img):
return np.array(img)
def _mask_transform(self, mask):
target = self._class_to_index(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))
def __len__(self):
return len(self.images)
@property
def classes(self):
"""Category names."""
return type(self).CLASSES
@property
def pred_offset(self):
return 1
def _get_ade20k_pairs(folder, mode='training'):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith(".jpg"):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('images', 'annotations')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths
if split in ('training', 'validation'):
img_folder = os.path.join(folder, 'images/' + split)
mask_folder = os.path.join(folder, 'annotations/' + split)
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths
else:
assert split == 'trainval'
print('trainval set')
train_img_folder = os.path.join(folder, 'images/training')
train_mask_folder = os.path.join(folder, 'annotations/training')
val_img_folder = os.path.join(folder, 'images/validation')
val_mask_folder = os.path.join(folder, 'annotations/validation')
train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder)
val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
return img_paths, mask_paths
if __name__ == '__main__':
dataset = ADE20KSegmentation()
img, label = dataset[0]
当我 运行 它总是显示 NameError: name '_get_ade20k_pairs' is not defined。我的定义有什么问题?因为有时它可以获得 运行 而有时它不能
我想您是从 here 复制代码,但未能正确复制 _get_ade20k_pairs
。
您需要缩进 0 个制表符。
所以我正在尝试在语义分割中创建预处理数据集的功能。但它告诉我我的功能没有定义。而实际上在那里定义。我的代码是这样的
import os
import random
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image, ImageOps, ImageFilter
__all__ = ['ADE20KSegmentation']
class ADE20KSegmentation(data.Dataset):
BASE_DIR = 'ADEChallengeData2016'
NUM_CLASS = 150
CLASSES = ("wall", "building, edifice", "sky", "floor, flooring", "tree",
"ceiling", "road, route", "bed", "windowpane, window", "grass",
"cabinet", "sidewalk, pavement",
"person, individual, someone, somebody, mortal, soul",
"earth, ground", "door, double door", "table", "mountain, mount",
"plant, flora, plant life", "curtain, drape, drapery, mantle, pall",
"chair", "car, auto, automobile, machine, motorcar",
"water", "painting, picture", "sofa, couch, lounge", "shelf",
"house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair",
"seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press",
"lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion",
"base, pedestal, stand", "box", "column, pillar", "signboard, sign",
"chest of drawers, chest, bureau, dresser", "counter", "sand", "sink",
"skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox",
"grandstand, covered stand", "path", "stairs, steps", "runway",
"case, display case, showcase, vitrine",
"pool table, billiard table, snooker table", "pillow",
"screen door, screen", "stairway, staircase", "river", "bridge, span",
"bookcase", "blind, screen", "coffee table, cocktail table",
"toilet, can, commode, crapper, pot, potty, stool, throne",
"flower", "book", "hill", "bench", "countertop",
"stove, kitchen stove, range, kitchen range, cooking stove",
"palm, palm tree", "kitchen island",
"computer, computing machine, computing device, data processor, "
"electronic computer, information processing system",
"swivel chair", "boat", "bar", "arcade machine",
"hovel, hut, hutch, shack, shanty",
"bus, autobus, coach, charabanc, double-decker, jitney, motorbus, "
"motorcoach, omnibus, passenger vehicle",
"towel", "light, light source", "truck, motortruck", "tower",
"chandelier, pendant, pendent", "awning, sunshade, sunblind",
"streetlight, street lamp", "booth, cubicle, stall, kiosk",
"television receiver, television, television set, tv, tv set, idiot "
"box, boob tube, telly, goggle box",
"airplane, aeroplane, plane", "dirt track",
"apparel, wearing apparel, dress, clothes",
"pole", "land, ground, soil",
"bannister, banister, balustrade, balusters, handrail",
"escalator, moving staircase, moving stairway",
"ottoman, pouf, pouffe, puff, hassock",
"bottle", "buffet, counter, sideboard",
"poster, posting, placard, notice, bill, card",
"stage", "van", "ship", "fountain",
"conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"canopy", "washer, automatic washer, washing machine",
"plaything, toy", "swimming pool, swimming bath, natatorium",
"stool", "barrel, cask", "basket, handbasket", "waterfall, falls",
"tent, collapsible shelter", "bag", "minibike, motorbike", "cradle",
"oven", "ball", "food, solid food", "step, stair", "tank, storage tank",
"trade name, brand name, brand, marque", "microwave, microwave oven",
"pot, flowerpot", "animal, animate being, beast, brute, creature, fauna",
"bicycle, bike, wheel, cycle", "lake",
"dishwasher, dish washer, dishwashing machine",
"screen, silver screen, projection screen",
"blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase",
"traffic light, traffic signal, stoplight", "tray",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, "
"dustbin, trash barrel, trash bin",
"fan", "pier, wharf, wharfage, dock", "crt screen",
"plate", "monitor, monitoring device", "bulletin board, notice board",
"shower", "radiator", "glass, drinking glass", "clock", "flag")
def __init__(self, root='/content/dataset', split='training', mode=None, transform=None,
base_size=520, crop_size=480, **kwargs):
super(ADE20KSegmentation, self).__init__()
self.root = root
self.split = split
self.mode = mode if mode is not None else split
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
self.images, self.mask_paths = _get_ade20k_pairs(self.root, self.split)
assert (len(self.images) == len(self.mask_paths))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: " + self.root + "\n")
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
self._key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')
def _class_to_index(self, mask):
values = np.unique(mask)
for value in values:
assert (value in self._mapping)
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)
def __getitem__(self, index):
img = PIL.Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = PIL.Image.open(self.mask_paths[index])
# synchrosized transform
if self.mode == 'training':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'valdation':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _img_transform(self, img):
return np.array(img)
def _mask_transform(self, mask):
target = self._class_to_index(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))
def __len__(self):
return len(self.images)
@property
def classes(self):
"""Category names."""
return type(self).CLASSES
@property
def pred_offset(self):
return 1
def _get_ade20k_pairs(folder, mode='training'):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith(".jpg"):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('images', 'annotations')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths
if split in ('training', 'validation'):
img_folder = os.path.join(folder, 'images/' + split)
mask_folder = os.path.join(folder, 'annotations/' + split)
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths
else:
assert split == 'trainval'
print('trainval set')
train_img_folder = os.path.join(folder, 'images/training')
train_mask_folder = os.path.join(folder, 'annotations/training')
val_img_folder = os.path.join(folder, 'images/validation')
val_mask_folder = os.path.join(folder, 'annotations/validation')
train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder)
val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
return img_paths, mask_paths
if __name__ == '__main__':
dataset = ADE20KSegmentation()
img, label = dataset[0]
当我 运行 它总是显示 NameError: name '_get_ade20k_pairs' is not defined。我的定义有什么问题?因为有时它可以获得 运行 而有时它不能
我想您是从 here 复制代码,但未能正确复制 _get_ade20k_pairs
。
您需要缩进 0 个制表符。