From d604880fe56542fb512dd7469399e92274002a3e Mon Sep 17 00:00:00 2001 From: eugenos-programos Date: Mon, 11 Sep 2023 13:05:45 +0300 Subject: [PATCH 1/4] chore: remove yolor inference code --- model_image/YOLORObjectDetectionModel.py | 37 - model_image/yolor/__init__.py | 1 - model_image/yolor/model.py | 12 - model_image/yolor/models/__init__.py | 1 - model_image/yolor/models/export.py | 68 -- model_image/yolor/models/models.py | 761 ------------ model_image/yolor/utils/__init__.py | 1 - model_image/yolor/utils/activations.py | 72 -- model_image/yolor/utils/autoanchor.py | 152 --- model_image/yolor/utils/datasets.py | 1399 ---------------------- model_image/yolor/utils/general.py | 480 -------- model_image/yolor/utils/google_utils.py | 120 -- model_image/yolor/utils/layers.py | 547 --------- model_image/yolor/utils/loss.py | 186 --- model_image/yolor/utils/metrics.py | 146 --- model_image/yolor/utils/parse_config.py | 74 -- model_image/yolor/utils/plots.py | 404 ------- model_image/yolor/utils/torch_utils.py | 240 ---- 18 files changed, 4701 deletions(-) delete mode 100644 model_image/YOLORObjectDetectionModel.py delete mode 100644 model_image/yolor/__init__.py delete mode 100644 model_image/yolor/model.py delete mode 100644 model_image/yolor/models/__init__.py delete mode 100644 model_image/yolor/models/export.py delete mode 100644 model_image/yolor/models/models.py delete mode 100644 model_image/yolor/utils/__init__.py delete mode 100644 model_image/yolor/utils/activations.py delete mode 100644 model_image/yolor/utils/autoanchor.py delete mode 100644 model_image/yolor/utils/datasets.py delete mode 100644 model_image/yolor/utils/general.py delete mode 100644 model_image/yolor/utils/google_utils.py delete mode 100644 model_image/yolor/utils/layers.py delete mode 100644 model_image/yolor/utils/loss.py delete mode 100644 model_image/yolor/utils/metrics.py delete mode 100644 model_image/yolor/utils/parse_config.py delete mode 100644 model_image/yolor/utils/plots.py delete mode 100644 model_image/yolor/utils/torch_utils.py diff --git a/model_image/YOLORObjectDetectionModel.py b/model_image/YOLORObjectDetectionModel.py deleted file mode 100644 index 5998d1d..0000000 --- a/model_image/YOLORObjectDetectionModel.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch -from yolor.model import get_model -import numpy as np -from yolor.utils.datasets import letterbox -from yolor.utils.general import non_max_suppression, scale_coords -import time - - -class YOLORObjectDetectionModel: - def __init__(self, model_path: str, config_path: str, conf_thresh, iou_thresh, classes, **kwargs) -> None: - self.model, self.device = get_model(model_path, config_path) - self.conf_thresh = conf_thresh - self.iou_thresh = iou_thresh - self.classes = classes - self.logger = kwargs.get("logger") - - def __preprocess_image__(self, img: np.array) -> np.array: - self.img_shape = img.shape - img = letterbox(img.copy(), new_shape=1280, auto_size=64)[0] - img = img[:, :, ::-1].transpose(2, 0, 1) - img = np.ascontiguousarray(img) - img = torch.from_numpy(img).to(self.device) - img = img.float() - img /= 255.0 - img = img.unsqueeze(0) - return img - - @torch.no_grad() - def __call__(self, img: np.array) -> list: - img = self.__preprocess_image__(img) - t1 = time.time() - pred = self.model(img, augment=False)[0] - t2 = time.time() - self.logger.info(f"Time used for prediction - {t2 - t1} s") - pred = non_max_suppression(pred, self.conf_thresh, self.iou_thresh, classes=self.classes, agnostic=False)[0] - pred[:, :4] = scale_coords(img.shape[2:], pred[:, :4], self.img_shape).round() - return pred[:, :5] diff --git a/model_image/yolor/__init__.py b/model_image/yolor/__init__.py deleted file mode 100644 index 04d37b3..0000000 --- a/model_image/yolor/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .model import get_model \ No newline at end of file diff --git a/model_image/yolor/model.py b/model_image/yolor/model.py deleted file mode 100644 index bd49e64..0000000 --- a/model_image/yolor/model.py +++ /dev/null @@ -1,12 +0,0 @@ -import torch -from .utils.torch_utils import select_device -from .models.models import Darknet - - -def get_model(weights, cfg): - imgsz = 1280 - device = select_device('cpu') - model = Darknet(cfg, imgsz) - model.load_state_dict(torch.load(weights, map_location=device)['model']) - model.to(device).eval() - return model, device diff --git a/model_image/yolor/models/__init__.py b/model_image/yolor/models/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/model_image/yolor/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/model_image/yolor/models/export.py b/model_image/yolor/models/export.py deleted file mode 100644 index f96920b..0000000 --- a/model_image/yolor/models/export.py +++ /dev/null @@ -1,68 +0,0 @@ -import argparse - -import torch - -from yolor.utils.google_utils import attempt_download - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov4.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - print(opt) - - # Input - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection - - # Load PyTorch model - attempt_download(opt.weights) - model = torch.load(opt.weights, map_location=torch.device('cpu'))['model'].float() - model.eval() - model.model[-1].export = True # set Detect() layer export=True - y = model(img) # dry run - - # TorchScript export - try: - print('\nStarting TorchScript export with torch %s...' % torch.__version__) - f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img) - ts.save(f) - print('TorchScript export success, saved as %s' % f) - except Exception as e: - print('TorchScript export failure: %s' % e) - - # ONNX export - try: - import onnx - - print('\nStarting ONNX export with onnx %s...' % onnx.__version__) - f = opt.weights.replace('.pt', '.onnx') # filename - model.fuse() # only for ONNX - torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output']) - - # Checks - onnx_model = onnx.load(f) # load onnx model - onnx.checker.check_model(onnx_model) # check onnx model - print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model - print('ONNX export success, saved as %s' % f) - except Exception as e: - print('ONNX export failure: %s' % e) - - # CoreML export - try: - import coremltools as ct - - print('\nStarting CoreML export with coremltools %s...' % ct.__version__) - # convert model from torchscript and apply pixel scaling as per detect.py - model = ct.convert(ts, inputs=[ct.ImageType(name='images', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print('CoreML export success, saved as %s' % f) - except Exception as e: - print('CoreML export failure: %s' % e) - - # Finish - print('\nExport complete. Visualize with https://github.com/lutzroeder/netron.') diff --git a/model_image/yolor/models/models.py b/model_image/yolor/models/models.py deleted file mode 100644 index cde2435..0000000 --- a/model_image/yolor/models/models.py +++ /dev/null @@ -1,761 +0,0 @@ -from yolor.utils.google_utils import * -from yolor.utils.layers import * -from yolor.utils.parse_config import * -from yolor.utils import torch_utils - -ONNX_EXPORT = False - - -def create_modules(module_defs, img_size, cfg): - # Constructs module list of layer blocks from module configuration in module_defs - - img_size = [img_size] * 2 if isinstance(img_size, int) else img_size # expand if necessary - _ = module_defs.pop(0) # cfg training hyperparams (unused) - output_filters = [3] # input channels - module_list = nn.ModuleList() - routs = [] # list of layers which rout to deeper layers - yolo_index = -1 - - for i, mdef in enumerate(module_defs): - modules = nn.Sequential() - - if mdef['type'] == 'convolutional': - bn = mdef['batch_normalize'] - filters = mdef['filters'] - k = mdef['size'] # kernel size - stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x']) - if isinstance(k, int): # single-size conv - modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1], - out_channels=filters, - kernel_size=k, - stride=stride, - padding=k // 2 if mdef['pad'] else 0, - groups=mdef['groups'] if 'groups' in mdef else 1, - bias=not bn)) - else: # multiple-size conv - modules.add_module('MixConv2d', MixConv2d(in_ch=output_filters[-1], - out_ch=filters, - k=k, - stride=stride, - bias=not bn)) - - if bn: - modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)) - else: - routs.append(i) # detection output (goes into yolo layer) - - if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441 - modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True)) - elif mdef['activation'] == 'swish': - modules.add_module('activation', Swish()) - elif mdef['activation'] == 'mish': - modules.add_module('activation', Mish()) - elif mdef['activation'] == 'emb': - modules.add_module('activation', F.normalize()) - elif mdef['activation'] == 'logistic': - modules.add_module('activation', nn.Sigmoid()) - elif mdef['activation'] == 'silu': - modules.add_module('activation', nn.SiLU()) - - elif mdef['type'] == 'deformableconvolutional': - bn = mdef['batch_normalize'] - filters = mdef['filters'] - k = mdef['size'] # kernel size - stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x']) - if isinstance(k, int): # single-size conv - modules.add_module('DeformConv2d', DeformConv2d(output_filters[-1], - filters, - kernel_size=k, - padding=k // 2 if mdef['pad'] else 0, - stride=stride, - bias=not bn, - modulation=True)) - else: # multiple-size conv - modules.add_module('MixConv2d', MixConv2d(in_ch=output_filters[-1], - out_ch=filters, - k=k, - stride=stride, - bias=not bn)) - - if bn: - modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)) - else: - routs.append(i) # detection output (goes into yolo layer) - - if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441 - modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True)) - elif mdef['activation'] == 'swish': - modules.add_module('activation', Swish()) - elif mdef['activation'] == 'mish': - modules.add_module('activation', Mish()) - elif mdef['activation'] == 'silu': - modules.add_module('activation', nn.SiLU()) - - elif mdef['type'] == 'dropout': - p = mdef['probability'] - modules = nn.Dropout(p) - - elif mdef['type'] == 'avgpool': - modules = GAP() - - elif mdef['type'] == 'silence': - filters = output_filters[-1] - modules = Silence() - - elif mdef['type'] == 'scale_channels': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ScaleChannel(layers=layers) - - elif mdef['type'] == 'shift_channels': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ShiftChannel(layers=layers) - - elif mdef['type'] == 'shift_channels_2d': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ShiftChannel2D(layers=layers) - - elif mdef['type'] == 'control_channels': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ControlChannel(layers=layers) - - elif mdef['type'] == 'control_channels_2d': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ControlChannel2D(layers=layers) - - elif mdef['type'] == 'alternate_channels': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] * 2 - routs.extend([i + l if l < 0 else l for l in layers]) - modules = AlternateChannel(layers=layers) - - elif mdef['type'] == 'alternate_channels_2d': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] * 2 - routs.extend([i + l if l < 0 else l for l in layers]) - modules = AlternateChannel2D(layers=layers) - - elif mdef['type'] == 'select_channels': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = SelectChannel(layers=layers) - - elif mdef['type'] == 'select_channels_2d': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = SelectChannel2D(layers=layers) - - elif mdef['type'] == 'sam': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = ScaleSpatial(layers=layers) - - elif mdef['type'] == 'BatchNorm2d': - filters = output_filters[-1] - modules = nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4) - if i == 0 and filters == 3: # normalize RGB image - # imagenet mean and var https://pytorch.org/docs/stable/torchvision/models.html#classification - modules.running_mean = torch.tensor([0.485, 0.456, 0.406]) - modules.running_var = torch.tensor([0.0524, 0.0502, 0.0506]) - - elif mdef['type'] == 'maxpool': - k = mdef['size'] # kernel size - stride = mdef['stride'] - maxpool = nn.MaxPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2) - if k == 2 and stride == 1: # yolov3-tiny - modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1))) - modules.add_module('MaxPool2d', maxpool) - else: - modules = maxpool - - elif mdef['type'] == 'local_avgpool': - k = mdef['size'] # kernel size - stride = mdef['stride'] - avgpool = nn.AvgPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2) - if k == 2 and stride == 1: # yolov3-tiny - modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1))) - modules.add_module('AvgPool2d', avgpool) - else: - modules = avgpool - - elif mdef['type'] == 'upsample': - if ONNX_EXPORT: # explicitly state size, avoid scale_factor - g = (yolo_index + 1) * 2 / 32 # gain - modules = nn.Upsample(size=tuple(int(x * g) for x in img_size)) # img_size = (320, 192) - else: - modules = nn.Upsample(scale_factor=mdef['stride']) - - elif mdef['type'] == 'route': # nn.Sequential() placeholder for 'route' layer - layers = mdef['layers'] - filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers]) - routs.extend([i + l if l < 0 else l for l in layers]) - modules = FeatureConcat(layers=layers) - - elif mdef['type'] == 'route2': # nn.Sequential() placeholder for 'route' layer - layers = mdef['layers'] - filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers]) - routs.extend([i + l if l < 0 else l for l in layers]) - modules = FeatureConcat2(layers=layers) - - elif mdef['type'] == 'route3': # nn.Sequential() placeholder for 'route' layer - layers = mdef['layers'] - filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers]) - routs.extend([i + l if l < 0 else l for l in layers]) - modules = FeatureConcat3(layers=layers) - - elif mdef['type'] == 'route_lhalf': # nn.Sequential() placeholder for 'route' layer - layers = mdef['layers'] - filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])//2 - routs.extend([i + l if l < 0 else l for l in layers]) - modules = FeatureConcat_l(layers=layers) - - elif mdef['type'] == 'shortcut': # nn.Sequential() placeholder for 'shortcut' layer - layers = mdef['from'] - filters = output_filters[-1] - routs.extend([i + l if l < 0 else l for l in layers]) - modules = WeightedFeatureFusion(layers=layers, weight='weights_type' in mdef) - - elif mdef['type'] == 'reorg3d': # yolov3-spp-pan-scale - pass - - elif mdef['type'] == 'reorg': # yolov3-spp-pan-scale - filters = 4 * output_filters[-1] - modules.add_module('Reorg', Reorg()) - - elif mdef['type'] == 'dwt': # yolov3-spp-pan-scale - filters = 4 * output_filters[-1] - modules.add_module('DWT', DWT()) - - elif mdef['type'] == 'implicit_add': # yolov3-spp-pan-scale - filters = mdef['filters'] - modules = ImplicitA(channel=filters) - - elif mdef['type'] == 'implicit_mul': # yolov3-spp-pan-scale - filters = mdef['filters'] - modules = ImplicitM(channel=filters) - - elif mdef['type'] == 'implicit_cat': # yolov3-spp-pan-scale - filters = mdef['filters'] - modules = ImplicitC(channel=filters) - - elif mdef['type'] == 'implicit_add_2d': # yolov3-spp-pan-scale - channels = mdef['filters'] - filters = mdef['atoms'] - modules = Implicit2DA(atom=filters, channel=channels) - - elif mdef['type'] == 'implicit_mul_2d': # yolov3-spp-pan-scale - channels = mdef['filters'] - filters = mdef['atoms'] - modules = Implicit2DM(atom=filters, channel=channels) - - elif mdef['type'] == 'implicit_cat_2d': # yolov3-spp-pan-scale - channels = mdef['filters'] - filters = mdef['atoms'] - modules = Implicit2DC(atom=filters, channel=channels) - - elif mdef['type'] == 'yolo': - yolo_index += 1 - stride = [8, 16, 32, 64, 128] # P3, P4, P5, P6, P7 strides - if any(x in cfg for x in ['yolov4-tiny', 'fpn', 'yolov3']): # P5, P4, P3 strides - stride = [32, 16, 8] - layers = mdef['from'] if 'from' in mdef else [] - modules = YOLOLayer(anchors=mdef['anchors'][mdef['mask']], # anchor list - nc=mdef['classes'], # number of classes - img_size=img_size, # (416, 416) - yolo_index=yolo_index, # 0, 1, 2... - layers=layers, # output layers - stride=stride[yolo_index]) - - # Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3) - try: - j = layers[yolo_index] if 'from' in mdef else -2 - bias_ = module_list[j][0].bias # shape(255,) - bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85) - #bias[:, 4] += -4.5 # obj - bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image) - bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc) - module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad) - - #j = [-2, -5, -8] - #for sj in j: - # bias_ = module_list[sj][0].bias - # bias = bias_[:modules.no * 1].view(1, -1) - # bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) - # bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) - # module_list[sj][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad) - except: - print('WARNING: smart bias initialization failure.') - - elif mdef['type'] == 'jde': - yolo_index += 1 - stride = [8, 16, 32, 64, 128] # P3, P4, P5, P6, P7 strides - if any(x in cfg for x in ['yolov4-tiny', 'fpn', 'yolov3']): # P5, P4, P3 strides - stride = [32, 16, 8] - layers = mdef['from'] if 'from' in mdef else [] - modules = JDELayer(anchors=mdef['anchors'][mdef['mask']], # anchor list - nc=mdef['classes'], # number of classes - img_size=img_size, # (416, 416) - yolo_index=yolo_index, # 0, 1, 2... - layers=layers, # output layers - stride=stride[yolo_index]) - - # Initialize preceding Conv2d() bias (https://arxiv.org/pdf/1708.02002.pdf section 3.3) - try: - j = layers[yolo_index] if 'from' in mdef else -1 - bias_ = module_list[j][0].bias # shape(255,) - bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85) - #bias[:, 4] += -4.5 # obj - bias.data[:, 4] += math.log(8 / (640 / stride[yolo_index]) ** 2) # obj (8 objects per 640 image) - bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc) - module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad) - except: - print('WARNING: smart bias initialization failure.') - - else: - print('Warning: Unrecognized Layer Type: ' + mdef['type']) - - # Register module list and number of output filters - module_list.append(modules) - output_filters.append(filters) - - routs_binary = [False] * (i + 1) - for i in routs: - routs_binary[i] = True - return module_list, routs_binary - - -class YOLOLayer(nn.Module): - def __init__(self, anchors, nc, img_size, yolo_index, layers, stride): - super(YOLOLayer, self).__init__() - self.anchors = torch.Tensor(anchors) - self.index = yolo_index # index of this layer in layers - self.layers = layers # model output layer indices - self.stride = stride # layer stride - self.nl = len(layers) # number of output layers (3) - self.na = len(anchors) # number of anchors (3) - self.nc = nc # number of classes (80) - self.no = nc + 5 # number of outputs (85) - self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints - self.anchor_vec = self.anchors / self.stride - self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2) - - if ONNX_EXPORT: - self.training = False - self.create_grids((img_size[1] // stride, img_size[0] // stride)) # number x, y grid points - - def create_grids(self, ng=(13, 13), device='cpu'): - self.nx, self.ny = ng # x and y grid size - self.ng = torch.tensor(ng, dtype=torch.float) - - # build xy offsets - if not self.training: - yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)]) - self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float() - - if self.anchor_vec.device != device: - self.anchor_vec = self.anchor_vec.to(device) - self.anchor_wh = self.anchor_wh.to(device) - - def forward(self, p, out): - ASFF = False # https://arxiv.org/abs/1911.09516 - if ASFF: - i, n = self.index, self.nl # index in layers, number of layers - p = out[self.layers[i]] - bs, _, ny, nx = p.shape # bs, 255, 13, 13 - if (self.nx, self.ny) != (nx, ny): - self.create_grids((nx, ny), p.device) - - # outputs and weights - # w = F.softmax(p[:, -n:], 1) # normalized weights - w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster) - # w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension - - # weighted ASFF sum - p = out[self.layers[i]][:, :-n] * w[:, i:i + 1] - for j in range(n): - if j != i: - p += w[:, j:j + 1] * \ - F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False) - - elif ONNX_EXPORT: - bs = 1 # batch size - else: - bs, _, ny, nx = p.shape # bs, 255, 13, 13 - if (self.nx, self.ny) != (nx, ny): - self.create_grids((nx, ny), p.device) - - # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh) - p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction - - if self.training: - return p - - elif ONNX_EXPORT: - # Avoid broadcasting for ANE operations - m = self.na * self.nx * self.ny - ng = 1. / self.ng.repeat(m, 1) - grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2) - anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng - - p = p.view(m, self.no) - xy = torch.sigmoid(p[:, 0:2]) + grid # x, y - wh = torch.exp(p[:, 2:4]) * anchor_wh # width, height - p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else \ - torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5]) # conf - return p_cls, xy * ng, wh - - else: # inference - io = p.sigmoid() - io[..., :2] = (io[..., :2] * 2. - 0.5 + self.grid) - io[..., 2:4] = (io[..., 2:4] * 2) ** 2 * self.anchor_wh - io[..., :4] *= self.stride - #io = p.clone() # inference output - #io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid # xy - #io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh # wh yolo method - #io[..., :4] *= self.stride - #torch.sigmoid_(io[..., 4:]) - return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85] - - -class JDELayer(nn.Module): - def __init__(self, anchors, nc, img_size, yolo_index, layers, stride): - super(JDELayer, self).__init__() - self.anchors = torch.Tensor(anchors) - self.index = yolo_index # index of this layer in layers - self.layers = layers # model output layer indices - self.stride = stride # layer stride - self.nl = len(layers) # number of output layers (3) - self.na = len(anchors) # number of anchors (3) - self.nc = nc # number of classes (80) - self.no = nc + 5 # number of outputs (85) - self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints - self.anchor_vec = self.anchors / self.stride - self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2) - - if ONNX_EXPORT: - self.training = False - self.create_grids((img_size[1] // stride, img_size[0] // stride)) # number x, y grid points - - def create_grids(self, ng=(13, 13), device='cpu'): - self.nx, self.ny = ng # x and y grid size - self.ng = torch.tensor(ng, dtype=torch.float) - - # build xy offsets - if not self.training: - yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)]) - self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float() - - if self.anchor_vec.device != device: - self.anchor_vec = self.anchor_vec.to(device) - self.anchor_wh = self.anchor_wh.to(device) - - def forward(self, p, out): - ASFF = False # https://arxiv.org/abs/1911.09516 - if ASFF: - i, n = self.index, self.nl # index in layers, number of layers - p = out[self.layers[i]] - bs, _, ny, nx = p.shape # bs, 255, 13, 13 - if (self.nx, self.ny) != (nx, ny): - self.create_grids((nx, ny), p.device) - - # outputs and weights - # w = F.softmax(p[:, -n:], 1) # normalized weights - w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster) - # w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension - - # weighted ASFF sum - p = out[self.layers[i]][:, :-n] * w[:, i:i + 1] - for j in range(n): - if j != i: - p += w[:, j:j + 1] * \ - F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False) - - elif ONNX_EXPORT: - bs = 1 # batch size - else: - bs, _, ny, nx = p.shape # bs, 255, 13, 13 - if (self.nx, self.ny) != (nx, ny): - self.create_grids((nx, ny), p.device) - - # p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh) - p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction - - if self.training: - return p - - elif ONNX_EXPORT: - # Avoid broadcasting for ANE operations - m = self.na * self.nx * self.ny - ng = 1. / self.ng.repeat(m, 1) - grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2) - anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng - - p = p.view(m, self.no) - xy = torch.sigmoid(p[:, 0:2]) + grid # x, y - wh = torch.exp(p[:, 2:4]) * anchor_wh # width, height - p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else \ - torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5]) # conf - return p_cls, xy * ng, wh - - else: # inference - #io = p.sigmoid() - #io[..., :2] = (io[..., :2] * 2. - 0.5 + self.grid) - #io[..., 2:4] = (io[..., 2:4] * 2) ** 2 * self.anchor_wh - #io[..., :4] *= self.stride - io = p.clone() # inference output - io[..., :2] = torch.sigmoid(io[..., :2]) * 2. - 0.5 + self.grid # xy - io[..., 2:4] = (torch.sigmoid(io[..., 2:4]) * 2) ** 2 * self.anchor_wh # wh yolo method - io[..., :4] *= self.stride - io[..., 4:] = F.softmax(io[..., 4:]) - return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85] - -class Darknet(nn.Module): - # YOLOv3 object detection model - - def __init__(self, cfg, img_size=(416, 416), verbose=False): - super(Darknet, self).__init__() - - self.module_defs = parse_model_cfg(cfg) - self.module_list, self.routs = create_modules(self.module_defs, img_size, cfg) - self.yolo_layers = get_yolo_layers(self) - # torch_utils.initialize_weights(self) - - # Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346 - self.version = np.array([0, 2, 5], dtype=np.int32) # (int32) version info: major, minor, revision - self.seen = np.array([0], dtype=np.int64) # (int64) number of images seen during training - self.info(verbose) if not ONNX_EXPORT else None # print model description - - def forward(self, x, augment=False, verbose=False): - - if not augment: - return self.forward_once(x) - else: # Augment images (inference and test only) https://github.com/ultralytics/yolov3/issues/931 - img_size = x.shape[-2:] # height, width - s = [0.83, 0.67] # scales - y = [] - for i, xi in enumerate((x, - torch_utils.scale_img(x.flip(3), s[0], same_shape=False), # flip-lr and scale - torch_utils.scale_img(x, s[1], same_shape=False), # scale - )): - # cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) - y.append(self.forward_once(xi)[0]) - - y[1][..., :4] /= s[0] # scale - y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr - y[2][..., :4] /= s[1] # scale - - # for i, yi in enumerate(y): # coco small, medium, large = < 32**2 < 96**2 < - # area = yi[..., 2:4].prod(2)[:, :, None] - # if i == 1: - # yi *= (area < 96. ** 2).float() - # elif i == 2: - # yi *= (area > 32. ** 2).float() - # y[i] = yi - - y = torch.cat(y, 1) - return y, None - - def forward_once(self, x, augment=False, verbose=False): - img_size = x.shape[-2:] # height, width - yolo_out, out = [], [] - if verbose: - print('0', x.shape) - str = '' - - # Augment images (inference and test only) - if augment: # https://github.com/ultralytics/yolov3/issues/931 - nb = x.shape[0] # batch size - s = [0.83, 0.67] # scales - x = torch.cat((x, - torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale - torch_utils.scale_img(x, s[1]), # scale - ), 0) - - for i, module in enumerate(self.module_list): - name = module.__class__.__name__ - #print(name) - if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l', 'ScaleChannel', 'ShiftChannel', 'ShiftChannel2D', 'ControlChannel', 'ControlChannel2D', 'AlternateChannel', 'AlternateChannel2D', 'SelectChannel', 'SelectChannel2D', 'ScaleSpatial']: # sum, concat - if verbose: - l = [i - 1] + module.layers # layers - sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes - str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)]) - x = module(x, out) # WeightedFeatureFusion(), FeatureConcat() - elif name in ['ImplicitA', 'ImplicitM', 'ImplicitC', 'Implicit2DA', 'Implicit2DM', 'Implicit2DC']: - x = module() - elif name == 'YOLOLayer': - yolo_out.append(module(x, out)) - elif name == 'JDELayer': - yolo_out.append(module(x, out)) - else: # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc. - #print(module) - #print(x.shape) - x = module(x) - - out.append(x if self.routs[i] else []) - if verbose: - print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str) - str = '' - - if self.training: # train - return yolo_out - elif ONNX_EXPORT: # export - x = [torch.cat(x, 0) for x in zip(*yolo_out)] - return x[0], torch.cat(x[1:3], 1) # scores, boxes: 3780x80, 3780x4 - else: # inference or test - x, p = zip(*yolo_out) # inference output, training output - x = torch.cat(x, 1) # cat yolo outputs - if augment: # de-augment results - x = torch.split(x, nb, dim=0) - x[1][..., :4] /= s[0] # scale - x[1][..., 0] = img_size[1] - x[1][..., 0] # flip lr - x[2][..., :4] /= s[1] # scale - x = torch.cat(x, 1) - return x, p - - def fuse(self): - # Fuse Conv2d + BatchNorm2d layers throughout model - print('Fusing layers...') - fused_list = nn.ModuleList() - for a in list(self.children())[0]: - if isinstance(a, nn.Sequential): - for i, b in enumerate(a): - if isinstance(b, nn.modules.batchnorm.BatchNorm2d): - # fuse this bn layer with the previous conv2d layer - conv = a[i - 1] - fused = torch_utils.fuse_conv_and_bn(conv, b) - a = nn.Sequential(fused, *list(a.children())[i + 1:]) - break - fused_list.append(a) - self.module_list = fused_list - self.info() if not ONNX_EXPORT else None # yolov3-spp reduced from 225 to 152 layers - - def info(self, verbose=False): - torch_utils.model_info(self, verbose) - - -def get_yolo_layers(model): - return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ in ['YOLOLayer', 'JDELayer']] # [89, 101, 113] - - -def load_darknet_weights(self, weights, cutoff=-1): - # Parses and loads the weights stored in 'weights' - - # Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded) - file = Path(weights).name - if file == 'darknet53.conv.74': - cutoff = 75 - elif file == 'yolov3-tiny.conv.15': - cutoff = 15 - - # Read weights file - with open(weights, 'rb') as f: - # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346 - self.version = np.fromfile(f, dtype=np.int32, count=3) # (int32) version info: major, minor, revision - self.seen = np.fromfile(f, dtype=np.int64, count=1) # (int64) number of images seen during training - - weights = np.fromfile(f, dtype=np.float32) # the rest are weights - - ptr = 0 - for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])): - if mdef['type'] == 'convolutional': - conv = module[0] - if mdef['batch_normalize']: - # Load BN bias, weights, running mean and running variance - bn = module[1] - nb = bn.bias.numel() # number of biases - # Bias - bn.bias.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.bias)) - ptr += nb - # Weight - bn.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.weight)) - ptr += nb - # Running Mean - bn.running_mean.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_mean)) - ptr += nb - # Running Var - bn.running_var.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_var)) - ptr += nb - else: - # Load conv. bias - nb = conv.bias.numel() - conv_b = torch.from_numpy(weights[ptr:ptr + nb]).view_as(conv.bias) - conv.bias.data.copy_(conv_b) - ptr += nb - # Load conv. weights - nw = conv.weight.numel() # number of weights - conv.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nw]).view_as(conv.weight)) - ptr += nw - - -def save_weights(self, path='model.weights', cutoff=-1): - # Converts a PyTorch model to Darket format (*.pt to *.weights) - # Note: Does not work if model.fuse() is applied - with open(path, 'wb') as f: - # Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346 - self.version.tofile(f) # (int32) version info: major, minor, revision - self.seen.tofile(f) # (int64) number of images seen during training - - # Iterate through layers - for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])): - if mdef['type'] == 'convolutional': - conv_layer = module[0] - # If batch norm, load bn first - if mdef['batch_normalize']: - bn_layer = module[1] - bn_layer.bias.data.cpu().numpy().tofile(f) - bn_layer.weight.data.cpu().numpy().tofile(f) - bn_layer.running_mean.data.cpu().numpy().tofile(f) - bn_layer.running_var.data.cpu().numpy().tofile(f) - # Load conv bias - else: - conv_layer.bias.data.cpu().numpy().tofile(f) - # Load conv weights - conv_layer.weight.data.cpu().numpy().tofile(f) - - -def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights', saveto='converted.weights'): - # Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa) - # from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights') - - # Initialize model - model = Darknet(cfg) - ckpt = torch.load(weights) # load checkpoint - try: - ckpt['model'] = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()} - model.load_state_dict(ckpt['model'], strict=False) - save_weights(model, path=saveto, cutoff=-1) - except KeyError as e: - print(e) - -def attempt_download(weights): - # Attempt to download pretrained weights if not found locally - weights = weights.strip() - msg = weights + ' missing, try downloading from https://drive.google.com/open?id=1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0' - - if len(weights) > 0 and not os.path.isfile(weights): - d = {''} - - file = Path(weights).name - if file in d: - r = gdrive_download(id=d[file], name=weights) - else: # download from pjreddie.com - url = 'https://pjreddie.com/media/files/' + file - print('Downloading ' + url) - r = os.system('curl -f ' + url + ' -o ' + weights) - - # Error check - if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB - os.system('rm ' + weights) # remove partial downloads - raise Exception(msg) diff --git a/model_image/yolor/utils/__init__.py b/model_image/yolor/utils/__init__.py deleted file mode 100644 index 8b13789..0000000 --- a/model_image/yolor/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/model_image/yolor/utils/activations.py b/model_image/yolor/utils/activations.py deleted file mode 100644 index ba6b854..0000000 --- a/model_image/yolor/utils/activations.py +++ /dev/null @@ -1,72 +0,0 @@ -# Activation functions - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -# Swish https://arxiv.org/pdf/1905.02244.pdf --------------------------------------------------------------------------- -class Swish(nn.Module): # - @staticmethod - def forward(x): - return x * torch.sigmoid(x) - - -class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() - @staticmethod - def forward(x): - # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX - - -class MemoryEfficientSwish(nn.Module): - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x * torch.sigmoid(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - return grad_output * (sx * (1 + x * (1 - sx))) - - def forward(self, x): - return self.F.apply(x) - - -# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- -class Mish(nn.Module): - @staticmethod - def forward(x): - return x * F.softplus(x).tanh() - - -class MemoryEfficientMish(nn.Module): - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - fx = F.softplus(x).tanh() - return grad_output * (fx + x * sx * (1 - fx * fx)) - - def forward(self, x): - return self.F.apply(x) - - -# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- -class FReLU(nn.Module): - def __init__(self, c1, k=3): # ch_in, kernel - super().__init__() - self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1) - self.bn = nn.BatchNorm2d(c1) - - def forward(self, x): - return torch.max(x, self.bn(self.conv(x))) diff --git a/model_image/yolor/utils/autoanchor.py b/model_image/yolor/utils/autoanchor.py deleted file mode 100644 index 1e82492..0000000 --- a/model_image/yolor/utils/autoanchor.py +++ /dev/null @@ -1,152 +0,0 @@ -# Auto-anchor utils - -import numpy as np -import torch -import yaml -from scipy.cluster.vq import kmeans -from tqdm import tqdm - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchor_grid.prod(-1).view(-1) # anchor area - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order - print('Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - m.anchor_grid[:] = m.anchor_grid.flip(0) - - -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - print('\nAnalyzing anchors... ', end='') - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall - return bpr, aat - - bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) - print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') - na = m.anchor_grid.numel() // 2 # number of anchors - new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(new_anchors.reshape(-1, 2))[0] - if new_bpr > bpr: # replace anchors - new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference - m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - check_anchor_order(m) - print('New anchors saved to model. Update model *.yaml to use these anchors in the future.') - else: - print('Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline - - -def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - path: path to dataset *.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.general import *; _ = kmean_anchors() - """ - thr = 1. / thr - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat)) - print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' % - (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='') - for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg - return k - - if isinstance(path, str): # *.yaml file - with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict - from utils.datasets import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - else: - dataset = path # dataset - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - print('WARNING: Extremely small objects found. ' - '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0))) - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - - # Kmeans calculation - print('Running kmeans for %g anchors on %g points...' % (n, len(wh))) - s = wh.std(0) # sigmas for whitening - k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - k *= s - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.tight_layout() - # fig.savefig('wh.png', dpi=200) - - # Evolve - npr = np.random - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f - if verbose: - print_results(k) - - return print_results(k) diff --git a/model_image/yolor/utils/datasets.py b/model_image/yolor/utils/datasets.py deleted file mode 100644 index 96aeb7d..0000000 --- a/model_image/yolor/utils/datasets.py +++ /dev/null @@ -1,1399 +0,0 @@ -# Dataset utils and dataloaders - -import glob -import math -import os -import random -import shutil -import time -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from threading import Thread - -import cv2 -import numpy as np -import torch -from PIL import Image, ExifTags -from torch.utils.data import Dataset - -from yolor.utils.general import xyxy2xywh, xywh2xyxy -from yolor.utils.torch_utils import torch_distributed_zero_first - -# Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', - 'tiff', 'dng'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', - 'm4v', 'wmv', 'mkv'] # acceptable video suffixes - -# Get orientation exif tag -for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': - break - - -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) - - -def exif_size(img): - # Returns exif-corrected PIL size - s = img.size # (width, height) - try: - rotation = dict(img._getexif().items())[orientation] - if rotation == 6: # rotation 270 - s = (s[1], s[0]) - elif rotation == 8: # rotation 90 - s = (s[1], s[0]) - except: - pass - - return s - - -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): - dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training - cache_images=cache, - single_cls=opt.single_cls, - stride=int(stride), - pad=pad, - rank=rank) - - batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > - 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler( - dataset) if rank != -1 else None - dataloader = InfiniteDataLoader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader() - return dataloader, dataset - - -def create_dataloader9(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): - dataset = LoadImagesAndLabels9(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training - cache_images=cache, - single_cls=opt.single_cls, - stride=int(stride), - pad=pad, - rank=rank) - - batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > - 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler( - dataset) if rank != -1 else None - dataloader = InfiniteDataLoader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels9.collate_fn) # torch.utils.data.DataLoader() - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): - """ Dataloader that reuses workers - - Uses same syntax as vanilla DataLoader - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', - _RepeatSampler(self.batch_sampler)) - self.iterator = super().__iter__() - - def __len__(self): - return len(self.batch_sampler.sampler) - - def __iter__(self): - for i in range(len(self)): - yield next(self.iterator) - - -class _RepeatSampler(object): - """ Sampler that repeats forever - - Args: - sampler (Sampler) - """ - - def __init__(self, sampler): - self.sampler = sampler - - def __iter__(self): - while True: - yield from iter(self.sampler) - - -class LoadImages: # for inference - def __init__(self, path, img_size=640, auto_size=32): - p = str(Path(path)) # os-agnostic - p = os.path.abspath(p) # absolute path - if '*' in p: - files = sorted(glob.glob(p, recursive=True)) # glob - elif os.path.isdir(p): - files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir - elif os.path.isfile(p): - files = [p] # files - else: - raise Exception('ERROR: %s does not exist' % p) - - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] - ni, nv = len(images), len(videos) - - self.img_size = img_size - self.auto_size = auto_size - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'images' - if any(videos): - self.new_video(videos[0]) # new video - else: - self.cap = None - assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \ - (p, img_formats, vid_formats) - - def __iter__(self): - self.count = 0 - return self - - def __next__(self): - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - ret_val, img0 = self.cap.read() - if not ret_val: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - else: - path = self.files[self.count] - self.new_video(path) - ret_val, img0 = self.cap.read() - - self.frame += 1 - print('video %g/%g (%g/%g) %s: ' % (self.count + 1, - self.nf, self.frame, self.nframes, path), end='') - - else: - # Read image - self.count += 1 - img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - print('image %g/%g %s: ' % (self.count, self.nf, path), end='') - - # Padded resize - img = letterbox(img0, new_shape=self.img_size, - auto_size=self.auto_size)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return path, img, img0, self.cap - - def new_video(self, path): - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - def __len__(self): - return self.nf # number of files - - -class LoadWebcam: # for inference - def __init__(self, pipe='0', img_size=640): - self.img_size = img_size - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if cv2.waitKey(1) == ord('q'): # q to quit - self.cap.release() - cv2.destroyAllWindows() - raise StopIteration - - # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break - - # Print - assert ret_val, 'Camera Error %s' % self.pipe - img_path = 'webcam.jpg' - print('webcam %g: ' % self.count, end='') - - # Padded resize - img = letterbox(img0, new_shape=self.img_size)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return img_path, img, img0, None - - def __len__(self): - return 0 - - -class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640): - self.mode = 'images' - self.img_size = img_size - - if os.path.isfile(sources): - with open(sources, 'r') as f: - sources = [x.strip() - for x in f.read().splitlines() if len(x.strip())] - else: - sources = [sources] - - n = len(sources) - self.imgs = [None] * n - self.sources = sources - for i, s in enumerate(sources): - # Start the thread to read frames from the video stream - print('%g/%g: %s... ' % (i + 1, n, s), end='') - cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) - assert cap.isOpened(), 'Failed to open %s' % s - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) % 100 - _, self.imgs[i] = cap.read() # guarantee first frame - thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) - thread.start() - print('') # newline - - # check for common shapes - s = np.stack([letterbox(x, new_shape=self.img_size)[ - 0].shape for x in self.imgs], 0) # inference shapes - # rect inference if all shapes equal - self.rect = np.unique(s, axis=0).shape[0] == 1 - if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') - - def update(self, index, cap): - # Read next stream frame in a daemon thread - n = 0 - while cap.isOpened(): - n += 1 - # _, self.imgs[index] = cap.read() - cap.grab() - if n == 4: # read every 4th frame - _, self.imgs[index] = cap.retrieve() - n = 0 - time.sleep(0.01) # wait time - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - img0 = self.imgs.copy() - if cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - - # Letterbox - img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] - for x in img0] - - # Stack - img = np.stack(img, 0) - - # Convert - # BGR to RGB, to bsx3x416x416 - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) - img = np.ascontiguousarray(img) - - return self.sources, img, img0, None - - def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years - - -class LoadImagesAndLabels(Dataset): # for training/testing - def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - # load 4 images at a time into a mosaic (only during training) - self.mosaic = self.augment and not self.rect - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - - def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = os.sep + 'images' + os.sep, os.sep + \ - 'labels' + os.sep # /images/, /labels/ substrings - return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths] - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - elif p.is_file(): # file - with open(p, 'r') as t: - t = t.read().splitlines() - parent = str(p.parent) + os.sep - # local to global path - f += [x.replace('./', parent) - if x.startswith('./') else x for x in t] - else: - raise Exception('%s does not exist' % p) - self.img_files = sorted( - [x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) - assert self.img_files, 'No images found' - except Exception as e: - raise Exception('Error loading data from %s: %s\nSee %s' % - (path, e, help_url)) - - # Check cache - self.label_files = img2label_paths(self.img_files) # labels - # cached labels - cache_path = str(Path(self.label_files[0]).parent) + '.cache3' - if os.path.isfile(cache_path): - cache = torch.load(cache_path) # load - # dataset changed - if cache['hash'] != get_hash(self.label_files + self.img_files): - cache = self.cache_labels(cache_path) # re-cache - else: - cache = self.cache_labels(cache_path) # cache - - # Read cache - cache.pop('hash') # remove hash - labels, shapes = zip(*cache.values()) - self.labels = list(labels) - self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - - n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil( - np.array(shapes) * img_size / stride + pad).astype(np.int) * stride - - # Check labels - create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False - # number missing, found, empty, datasubset, duplicate - nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 - pbar = enumerate(self.label_files) - if rank in [-1, 0]: - pbar = tqdm(pbar) - for i, file in pbar: - l = self.labels[i] # label - if l is not None and l.shape[0]: - assert l.shape[1] == 5, '> 5 label columns: %s' % file - assert (l >= 0).all(), 'negative labels: %s' % file - assert (l[:, 1:] <= 1).all( - ), 'non-normalized or out of bounds coordinate labels: %s' % file - if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows - # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows - nd += 1 - if single_cls: - l[:, 0] = 0 # force dataset into single-class mode - self.labels[i] = l - nf += 1 # file found - - # Create subdataset (a smaller dataset) - if create_datasubset and ns < 1E4: - if ns == 0: - create_folder(path='./datasubset') - os.makedirs('./datasubset/images') - exclude_classes = 43 - if exclude_classes not in l[:, 0]: - ns += 1 - # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image - with open('./datasubset/images.txt', 'a') as f: - f.write(self.img_files[i] + '\n') - - # Extract object detection boxes for a second stage classifier - if extract_bounding_boxes: - p = Path(self.img_files[i]) - img = cv2.imread(str(p)) - h, w = img.shape[:2] - for j, x in enumerate(l): - f = '%s%sclassifier%s%g_%g_%s' % ( - p.parent.parent, os.sep, os.sep, x[0], j, p.name) - if not os.path.exists(Path(f).parent): - # make new output folder - os.makedirs(Path(f).parent) - - b = x[1:] * [w, h, w, h] # box - b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.3 + 30 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) - - # clip boxes outside of image - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite( - f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' - else: - # print('empty labels for image %s' % self.img_files[i]) # file empty - ne += 1 - # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove - - if rank in [-1, 0]: - pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( - cache_path, nf, nm, ne, nd, n) - if nf == 0: - s = 'WARNING: No labels found in %s. See %s' % ( - os.path.dirname(file) + os.sep, help_url) - print(s) - assert not augment, '%s. Can not train without labels.' % s - - # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n - if cache_images: - gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image( - *x), zip(repeat(self), range(n))) # 8 threads - pbar = tqdm(enumerate(results), total=n) - for i, x in pbar: - # img, hw_original, hw_resized = load_image(self, i) - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x - gb += self.imgs[i].nbytes - pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) - - def cache_labels(self, path='labels.cache3'): - # Cache dataset labels, check images and read shapes - x = {} # dict - pbar = tqdm(zip(self.img_files, self.label_files), - desc='Scanning images', total=len(self.img_files)) - for (img, label) in pbar: - try: - l = [] - im = Image.open(img) - im.verify() # PIL verify - shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' - if os.path.isfile(label): - with open(label, 'r') as f: - l = np.array( - [x.split() for x in f.read().splitlines()], dtype=np.float32) # labels - if len(l) == 0: - l = np.zeros((0, 5), dtype=np.float32) - x[img] = [l, shape] - except Exception as e: - print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e)) - - x['hash'] = get_hash(self.label_files + self.img_files) - torch.save(x, path) # save for next time - return x - - def __len__(self): - return len(self.img_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - if self.image_weights: - index = self.indices[index] - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - img, labels = load_mosaic(self, index) - # img, labels = load_mosaic9(self, index) - shapes = None - - # MixUp https://arxiv.org/pdf/1710.09412.pdf - if random.random() < hyp['mixup']: - img2, labels2 = load_mosaic( - self, random.randint(0, len(self.labels) - 1)) - # img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - - else: - # Load image - img, (h0, w0), (h, w) = load_image(self, index) - - # Letterbox - # final letterboxed shape - shape = self.batch_shapes[self.batch[index] - ] if self.rect else self.img_size - img, ratio, pad = letterbox( - img, shape, auto=False, scaleup=self.augment) - # for COCO mAP rescaling - shapes = (h0, w0), ((h / h0, w / w0), pad) - - # Load labels - labels = [] - x = self.labels[index] - if x.size > 0: - # Normalized xywh to pixel xyxy format - labels = x.copy() - labels[:, 1] = ratio[0] * w * \ - (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width - labels[:, 2] = ratio[1] * h * \ - (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height - labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] - labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] - - if self.augment: - # Augment imagespace - if not mosaic: - img, labels = random_perspective(img, labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], - sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - nL = len(labels) # number of labels - if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 - - if self.augment: - # flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nL: - labels[:, 2] = 1 - labels[:, 2] - - # flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nL: - labels[:, 1] = 1 - labels[:, 1] - - labels_out = torch.zeros((nL, 6)) - if nL: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.img_files[index], shapes - - @staticmethod - def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes - - -class LoadImagesAndLabels9(Dataset): # for training/testing - def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - # load 4 images at a time into a mosaic (only during training) - self.mosaic = self.augment and not self.rect - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - - def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = os.sep + 'images' + os.sep, os.sep + \ - 'labels' + os.sep # /images/, /labels/ substrings - return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths] - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - elif p.is_file(): # file - with open(p, 'r') as t: - t = t.read().splitlines() - parent = str(p.parent) + os.sep - # local to global path - f += [x.replace('./', parent) - if x.startswith('./') else x for x in t] - else: - raise Exception('%s does not exist' % p) - self.img_files = sorted( - [x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) - assert self.img_files, 'No images found' - except Exception as e: - raise Exception('Error loading data from %s: %s\nSee %s' % - (path, e, help_url)) - - # Check cache - self.label_files = img2label_paths(self.img_files) # labels - # cached labels - cache_path = str(Path(self.label_files[0]).parent) + '.cache3' - if os.path.isfile(cache_path): - cache = torch.load(cache_path) # load - # dataset changed - if cache['hash'] != get_hash(self.label_files + self.img_files): - cache = self.cache_labels(cache_path) # re-cache - else: - cache = self.cache_labels(cache_path) # cache - - # Read cache - cache.pop('hash') # remove hash - labels, shapes = zip(*cache.values()) - self.labels = list(labels) - self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - - n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil( - np.array(shapes) * img_size / stride + pad).astype(np.int) * stride - - # Check labels - create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False - # number missing, found, empty, datasubset, duplicate - nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 - pbar = enumerate(self.label_files) - if rank in [-1, 0]: - pbar = tqdm(pbar) - for i, file in pbar: - l = self.labels[i] # label - if l is not None and l.shape[0]: - assert l.shape[1] == 5, '> 5 label columns: %s' % file - assert (l >= 0).all(), 'negative labels: %s' % file - assert (l[:, 1:] <= 1).all( - ), 'non-normalized or out of bounds coordinate labels: %s' % file - if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows - # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows - nd += 1 - if single_cls: - l[:, 0] = 0 # force dataset into single-class mode - self.labels[i] = l - nf += 1 # file found - - # Create subdataset (a smaller dataset) - if create_datasubset and ns < 1E4: - if ns == 0: - create_folder(path='./datasubset') - os.makedirs('./datasubset/images') - exclude_classes = 43 - if exclude_classes not in l[:, 0]: - ns += 1 - # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image - with open('./datasubset/images.txt', 'a') as f: - f.write(self.img_files[i] + '\n') - - # Extract object detection boxes for a second stage classifier - if extract_bounding_boxes: - p = Path(self.img_files[i]) - img = cv2.imread(str(p)) - h, w = img.shape[:2] - for j, x in enumerate(l): - f = '%s%sclassifier%s%g_%g_%s' % ( - p.parent.parent, os.sep, os.sep, x[0], j, p.name) - if not os.path.exists(Path(f).parent): - # make new output folder - os.makedirs(Path(f).parent) - - b = x[1:] * [w, h, w, h] # box - b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.3 + 30 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) - - # clip boxes outside of image - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite( - f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' - else: - # print('empty labels for image %s' % self.img_files[i]) # file empty - ne += 1 - # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove - - if rank in [-1, 0]: - pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( - cache_path, nf, nm, ne, nd, n) - if nf == 0: - s = 'WARNING: No labels found in %s. See %s' % ( - os.path.dirname(file) + os.sep, help_url) - print(s) - assert not augment, '%s. Can not train without labels.' % s - - # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n - if cache_images: - gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image( - *x), zip(repeat(self), range(n))) # 8 threads - pbar = tqdm(enumerate(results), total=n) - for i, x in pbar: - # img, hw_original, hw_resized = load_image(self, i) - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x - gb += self.imgs[i].nbytes - pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) - - def cache_labels(self, path='labels.cache3'): - # Cache dataset labels, check images and read shapes - x = {} # dict - pbar = tqdm(zip(self.img_files, self.label_files), - desc='Scanning images', total=len(self.img_files)) - for (img, label) in pbar: - try: - l = [] - im = Image.open(img) - im.verify() # PIL verify - shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' - if os.path.isfile(label): - with open(label, 'r') as f: - l = np.array( - [x.split() for x in f.read().splitlines()], dtype=np.float32) # labels - if len(l) == 0: - l = np.zeros((0, 5), dtype=np.float32) - x[img] = [l, shape] - except Exception as e: - print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e)) - - x['hash'] = get_hash(self.label_files + self.img_files) - torch.save(x, path) # save for next time - return x - - def __len__(self): - return len(self.img_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - if self.image_weights: - index = self.indices[index] - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - # img, labels = load_mosaic(self, index) - img, labels = load_mosaic9(self, index) - shapes = None - - # MixUp https://arxiv.org/pdf/1710.09412.pdf - if random.random() < hyp['mixup']: - # img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) - img2, labels2 = load_mosaic9( - self, random.randint(0, len(self.labels) - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - - else: - # Load image - img, (h0, w0), (h, w) = load_image(self, index) - - # Letterbox - # final letterboxed shape - shape = self.batch_shapes[self.batch[index] - ] if self.rect else self.img_size - img, ratio, pad = letterbox( - img, shape, auto=False, scaleup=self.augment) - # for COCO mAP rescaling - shapes = (h0, w0), ((h / h0, w / w0), pad) - - # Load labels - labels = [] - x = self.labels[index] - if x.size > 0: - # Normalized xywh to pixel xyxy format - labels = x.copy() - labels[:, 1] = ratio[0] * w * \ - (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width - labels[:, 2] = ratio[1] * h * \ - (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height - labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] - labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] - - if self.augment: - # Augment imagespace - if not mosaic: - img, labels = random_perspective(img, labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], - sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - nL = len(labels) # number of labels - if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 - - if self.augment: - # flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nL: - labels[:, 2] = 1 - labels[:, 2] - - # flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nL: - labels[:, 1] = 1 - labels[:, 1] - - labels_out = torch.zeros((nL, 6)) - if nL: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.img_files[index], shapes - - @staticmethod - def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes - - -# Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # resize image to img_size - if r != 1: # always resize down, only resize up if training with augmentation - interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=interp) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized - else: - # img, hw_original, hw_resized - return self.imgs[index], self.img_hw0[index], self.img_hw[index] - - -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=np.int16) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT( - sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - # Histogram equalization - # if random.random() < 0.2: - # for i in range(3): - # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) - - -def load_mosaic(self, index): - # loads images in a mosaic - - labels4 = [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) - for x in self.mosaic_border] # mosaic center x, y - # 3 additional image indices - indices = [index] + \ - [random.randint(0, len(self.labels) - 1) for _ in range(3)] - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - # base image with 4 tiles - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) - # xmin, ymin, xmax, ymax (large image) - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc - # xmin, ymin, xmax, ymax (small image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - # img4[ymin:ymax, xmin:xmax] - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - x = self.labels[index] - labels = x.copy() - if x.size > 0: # Normalized xywh to pixel xyxy format - labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw - labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh - labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw - labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh - labels4.append(labels) - - # Concat/clip labels - if len(labels4): - labels4 = np.concatenate(labels4, 0) - # use with random_perspective - np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4 = random_perspective(img4, labels4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - -def load_mosaic9(self, index): - # loads images in a 9-mosaic - - labels9 = [] - s = self.img_size - # 8 additional image indices - indices = [index] + \ - [random.randint(0, len(self.labels) - 1) for _ in range(8)] - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img9 - if i == 0: # center - # base image with 4 tiles - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords - - # Labels - x = self.labels[index] - labels = x.copy() - if x.size > 0: # Normalized xywh to pixel xyxy format - labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx - labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady - labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx - labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady - labels9.append(labels) - - # Image - # img9[ymin:ymax, xmin:xmax] - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = [int(random.uniform(0, s)) - for x in self.mosaic_border] # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - if len(labels9): - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - - # use with random_perspective - np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - img9, labels9 = random_perspective(img9, labels9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh) - ), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - # img4[ymin:ymax, xmin:xmax] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] - labels = np.append( - labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, auto_size=32): - # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - \ - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, auto_size), np.mod(dh, auto_size) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / \ - shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder( - img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - # x perspective (about y) - P[2, 0] = random.uniform(-perspective, perspective) - # y perspective (about x) - P[2, 1] = random.uniform(-perspective, perspective) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * - math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * - math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + - translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + - translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=( - width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=( - width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - # warp points - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape( - n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - if perspective: - xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale - else: # affine - xy = xy[:, :2].reshape(n, 8) - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - xy = np.concatenate( - (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # # apply angle-based reduction of bounding boxes - # radians = a * math.pi / 180 - # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 - # x = (xy[:, 2] + xy[:, 0]) / 2 - # y = (xy[:, 3] + xy[:, 1]) / 2 - # w = (xy[:, 2] - xy[:, 0]) * reduction - # h = (xy[:, 3] - xy[:, 1]) * reduction - # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T - - # clip boxes - xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) - xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) - targets = targets[i] - targets[:, 1:5] = xy[i] - - return img, targets - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio - # candidates - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + \ - [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [ - random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def create_folder(path='./new'): - # Create folder - if os.path.exists(path): - shutil.rmtree(path) # delete output folder - os.makedirs(path) # make new output folder - - -def flatten_recursive(path='../coco128'): - # Flatten a recursive directory by bringing all files to top level - new_path = Path(path + '_flat') - create_folder(new_path) - for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): - shutil.copyfile(file, new_path / Path(file).name) diff --git a/model_image/yolor/utils/general.py b/model_image/yolor/utils/general.py deleted file mode 100644 index cd5eefd..0000000 --- a/model_image/yolor/utils/general.py +++ /dev/null @@ -1,480 +0,0 @@ -# General utils - -import glob -import logging -import math -import os -import platform -import random -import re -import subprocess -import time -from pathlib import Path - -import cv2 -import numpy as np -import torch -import yaml - -from yolor.utils.google_utils import gsutil_getsize -from yolor.utils.metrics import fitness -from yolor.utils.torch_utils import init_torch_seeds - -# Set printoptions -torch.set_printoptions(linewidth=320, precision=5, profile='long') -# format short g, %precision=5 -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) - -# Prevent OpenCV from multithreading (to use PyTorch DataLoader) -cv2.setNumThreads(0) - - -def set_logging(rank=-1): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) - - -def init_seeds(seed=0): - random.seed(seed) - np.random.seed(seed) - init_torch_seeds(seed) - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def check_git_status(): - # Suggest 'git pull' if repo is out of date - if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'): - s = subprocess.check_output( - 'if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8') - if 'Your branch is behind' in s: - print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n') - - -def check_img_size(img_size, s=32): - # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple - if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % - (img_size, s, new_size)) - return new_size - - -def check_file(file): - # Search for file if not found - if os.path.isfile(file) or file == '': - return file - else: - files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), 'File Not Found: %s' % file # assert file was found - assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % ( - file, files) # assert unique - return files[0] # return file - - -def check_dataset(dict): - # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') - if val and len(val): - val = [Path(x).resolve() - for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % - [str(x) for x in val if not x.exists()]) - if s and len(s): # download script - print('Downloading %s ...' % s) - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - torch.hub.download_url_to_file(s, f) - r = os.system('unzip -q %s -d ../ && rm %s' % - (f, f)) # unzip - else: # bash script - r = os.system(s) - print('Dataset autodownload %s\n' % ('success' if r == - 0 else 'failure')) # analyze return value - else: - raise Exception('Dataset not found.') - - -def make_divisible(x, divisor): - # Returns x evenly divisible by divisor - return math.ceil(x / divisor) * divisor - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights) - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class mAPs - n = len(labels) - class_counts = np.array( - [np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)]) - image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) - # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample - return image_weights - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - return x - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], - img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / \ - 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords - - -def clip_coords(boxes, img_shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - - -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, ECIoU=False, eps=1e-9): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU or EIoU or ECIoU: - # convex (smallest enclosing box) width - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU or EIoU or ECIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * \ - torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - return iou - (rho2 / c2 + v * alpha) # CIoU - elif EIoU: # Efficient IoU https://arxiv.org/abs/2101.08158 - rho3 = (w1-w2) ** 2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) ** 2 - c4 = ch ** 2 + eps - return iou - rho2 / c2 - rho3 / c3 - rho4 / c4 # EIoU - elif ECIoU: - v = (4 / math.pi ** 2) * \ - torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) - rho3 = (w1-w2) ** 2 - c3 = cw ** 2 + eps - rho4 = (h1-h2) ** 2 - c4 = ch ** 2 + eps - return iou - v * alpha - rho2 / c2 - rho3 / c3 - rho4 / c4 # ECIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - # iou = inter / (area1 + area2 - inter) - return inter / (area1[:, None] + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - # iou = inter / (area1 + area2 - inter) - return inter / (wh1.prod(2) + wh2.prod(2) - inter) - - -def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False): - """Performs Non-Maximum Suppression (NMS) on inference results - - Returns: - detections with shape: nx6 (x1, y1, x2, y2, conf, cls) - """ - - nc = prediction[0].shape[1] - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Settings - # (pixels) minimum and maximum box width and height - min_wh, max_wh = 2, 4096 - max_det = 300 # maximum number of detections per image - time_limit = 10.0 # seconds to quit after - redundant = True # require redundant detections - multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) - - t = time.time() - output = [torch.zeros(0, 6)] * prediction.shape[0] - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) - else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[ - conf.view(-1) > conf_thres] - - # Filter by class - if classes: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # If none remain process next image - n = x.shape[0] # number of boxes - if not n: - continue - - # Sort by confidence - # x = x[x[:, 4].argsort(descending=True)] - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - # boxes (offset by class), scores - boxes, scores = x[:, :4] + c, x[:, 4] - i = torch.ops.torchvision.nms(boxes, scores, iou_thres) - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float( - ) / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if (time.time() - t) > time_limit: - break # time limit exceeded - - return output - - -# from utils.general import *; strip_optimizer() -def strip_optimizer(f='weights/best.pt', s=''): - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - x['optimizer'] = None - x['training_results'] = None - x['epoch'] = -1 - # x['model'].half() # to FP16 - # for p in x['model'].parameters(): - # p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - print('Optimizer stripped from %s,%s %.1fMB' % - (f, (' saved as %s,' % s) if s else '', mb)) - - -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - c = '%10.4g' * len(results) % results - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) - - if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - # download evolve.txt if larger than local - os.system('gsutil cp %s .' % url) - - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), - axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness - - # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - c = '%10.4g' * len(results) % results - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len( - x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) - - if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % - (yaml_file, bucket)) # upload - - -def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for j, a in enumerate(d): # per item - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) - - # BGR to RGB, to 3x416x416 - im = im[:, :, ::-1].transpose(2, 0, 1) - im = np.ascontiguousarray( - im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device) - ).argmax(1) # classifier prediction - # retain matching class detections - x[i] = x[i][pred_cls1 == pred_cls2] - - return x - - -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. - path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): - return str(path) - else: - dirs = glob.glob(f"{path}{sep}*") # similar paths - matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] - i = [int(m.groups()[0]) for m in matches if m] # indices - n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path diff --git a/model_image/yolor/utils/google_utils.py b/model_image/yolor/utils/google_utils.py deleted file mode 100644 index 0ff8bbd..0000000 --- a/model_image/yolor/utils/google_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries - -import os -import platform -import subprocess -import time -from pathlib import Path - -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(weights): - # Attempt to download pretrained weights if not found locally - weights = weights.strip().replace("'", '') - file = Path(weights).name - - msg = weights + ' missing, try downloading from https://github.com/WongKinYiu/yolor/releases/' - models = ['yolor_p6.pt', 'yolor_w6.pt'] # available models - - if file in models and not os.path.isfile(weights): - - try: # GitHub - url = 'https://github.com/WongKinYiu/yolor/releases/download/v1.0/' + file - print('Downloading %s to %s...' % (url, weights)) - torch.hub.download_url_to_file(url, weights) - assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check - except Exception as e: # GCP - print('ERROR: Download failure.') - print('') - - -def attempt_load(weights, map_location=None): - # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a - model = Ensemble() - for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model - - if len(model) == 1: - return model[-1] # return model - else: - print('Ensemble created with %s\n' % weights) - for k in ['names', 'stride']: - setattr(model, k, getattr(model[-1], k)) - return model # return ensemble - - -def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'): - # Downloads a file from Google Drive. from utils.google_utils import *; gdrive_download() - t = time.time() - - print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') - os.remove(name) if os.path.exists(name) else None # remove existing - os.remove('cookie') if os.path.exists('cookie') else None - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out)) - if os.path.exists('cookie'): # large file - s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name) - else: # small file - s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id) - r = os.system(s) # execute, capture return - os.remove('cookie') if os.path.exists('cookie') else None - - # Error check - if r != 0: - os.remove(name) if os.path.exists(name) else None # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if name.endswith('.zip'): - print('unzipping... ', end='') - os.system('unzip -q %s' % name) # unzip - os.remove(name) # remove zip to free space - - print('Done (%.1fs)' % (time.time() - t)) - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/model_image/yolor/utils/layers.py b/model_image/yolor/utils/layers.py deleted file mode 100644 index 123fb53..0000000 --- a/model_image/yolor/utils/layers.py +++ /dev/null @@ -1,547 +0,0 @@ -import torch.nn.functional as F - -from yolor.utils.general import * - -import torch -from torch import nn - - -class Mish(nn.Module): # https://github.com/digantamisra98/Mish - def forward(self, x): - return x * F.softplus(x).tanh() - - -class DWT(nn.Module): - def forward(self, x): - return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) - - -class Reorg(nn.Module): - def forward(self, x): - return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) - - -def make_divisible(v, divisor): - # Function ensures all layers have a channel number that is divisible by 8 - # https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py - return math.ceil(v / divisor) * divisor - - -class Flatten(nn.Module): - # Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions - def forward(self, x): - return x.view(x.size(0), -1) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super(Concat, self).__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class FeatureConcat(nn.Module): - def __init__(self, layers): - super(FeatureConcat, self).__init__() - self.layers = layers # layer indices - self.multiple = len(layers) > 1 # multiple layers flag - - def forward(self, x, outputs): - return torch.cat([outputs[i] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]] - - -class FeatureConcat2(nn.Module): - def __init__(self, layers): - super(FeatureConcat2, self).__init__() - self.layers = layers # layer indices - self.multiple = len(layers) > 1 # multiple layers flag - - def forward(self, x, outputs): - return torch.cat([outputs[self.layers[0]], outputs[self.layers[1]].detach()], 1) - - -class FeatureConcat3(nn.Module): - def __init__(self, layers): - super(FeatureConcat3, self).__init__() - self.layers = layers # layer indices - self.multiple = len(layers) > 1 # multiple layers flag - - def forward(self, x, outputs): - return torch.cat([outputs[self.layers[0]], outputs[self.layers[1]].detach(), outputs[self.layers[2]].detach()], 1) - - -class FeatureConcat_l(nn.Module): - def __init__(self, layers): - super(FeatureConcat_l, self).__init__() - self.layers = layers # layer indices - self.multiple = len(layers) > 1 # multiple layers flag - - def forward(self, x, outputs): - return torch.cat([outputs[i][:, :outputs[i].shape[1]//2, :, :] for i in self.layers], 1) if self.multiple else outputs[self.layers[0]][:, :outputs[self.layers[0]].shape[1]//2, :, :] - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class WeightedFeatureFusion(nn.Module): - def __init__(self, layers, weight=False): - super(WeightedFeatureFusion, self).__init__() - self.layers = layers # layer indices - self.weight = weight # apply weights boolean - self.n = len(layers) + 1 # number of layers - if weight: - self.w = nn.Parameter(torch.zeros( - self.n), requires_grad=True) # layer weights - - def forward(self, x, outputs): - # Weights - if self.weight: - w = torch.sigmoid(self.w) * (2 / self.n) # sigmoid weights (0-1) - x = x * w[0] - - # Fusion - nx = x.shape[1] # input channels - for i in range(self.n - 1): - # feature to add - a = outputs[self.layers[i]] * \ - w[i + 1] if self.weight else outputs[self.layers[i]] - na = a.shape[1] # feature channels - - # Adjust channels - if nx == na: # same shape - x = x + a - elif nx > na: # slice input - # or a = nn.ZeroPad2d((0, 0, 0, 0, 0, dc))(a); x = x + a - x[:, :na] = x[:, :na] + a - else: # slice feature - x = x + a[:, :nx] - - return x - - -# MixConv: Mixed Depthwise Convolutional Kernels https://arxiv.org/abs/1907.09595 -class MixConv2d(nn.Module): - def __init__(self, in_ch, out_ch, k=(3, 5, 7), stride=1, dilation=1, bias=True, method='equal_params'): - super(MixConv2d, self).__init__() - - groups = len(k) - if method == 'equal_ch': # equal channels per group - # out_ch indices - i = torch.linspace(0, groups - 1E-6, out_ch).floor() - ch = [(i == g).sum() for g in range(groups)] - else: # 'equal_params': equal parameter count per group - b = [out_ch] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - ch = np.linalg.lstsq(a, b, rcond=None)[0].round().astype( - int) # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(in_channels=in_ch, - out_channels=ch[g], - kernel_size=k[g], - stride=stride, - padding=k[g] // 2, # 'same' pad - dilation=dilation, - bias=bias) for g in range(groups)]) - - def forward(self, x): - return torch.cat([m(x) for m in self.m], 1) - - -# Activation functions below ------------------------------------------------------------------------------------------- -class SwishImplementation(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x * torch.sigmoid(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) # sigmoid(ctx) - return grad_output * (sx * (1 + x * (1 - sx))) - - -class MishImplementation(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - fx = F.softplus(x).tanh() - return grad_output * (fx + x * sx * (1 - fx * fx)) - - -class MemoryEfficientSwish(nn.Module): - def forward(self, x): - return SwishImplementation.apply(x) - - -class MemoryEfficientMish(nn.Module): - def forward(self, x): - return MishImplementation.apply(x) - - -class Swish(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class HardSwish(nn.Module): # https://arxiv.org/pdf/1905.02244.pdf - def forward(self, x): - return x * F.hardtanh(x + 3, 0., 6., True) / 6. - - -class DeformConv2d(nn.Module): - def __init__(self, inc, outc, kernel_size=3, padding=1, stride=1, bias=None, modulation=False): - """ - Args: - modulation (bool, optional): If True, Modulated Defomable Convolution (Deformable ConvNets v2). - """ - super(DeformConv2d, self).__init__() - self.kernel_size = kernel_size - self.padding = padding - self.stride = stride - self.zero_padding = nn.ZeroPad2d(padding) - self.conv = nn.Conv2d( - inc, outc, kernel_size=kernel_size, stride=kernel_size, bias=bias) - - self.p_conv = nn.Conv2d( - inc, 2*kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride) - nn.init.constant_(self.p_conv.weight, 0) - self.p_conv.register_backward_hook(self._set_lr) - - self.modulation = modulation - if modulation: - self.m_conv = nn.Conv2d( - inc, kernel_size*kernel_size, kernel_size=3, padding=1, stride=stride) - nn.init.constant_(self.m_conv.weight, 0) - self.m_conv.register_backward_hook(self._set_lr) - - @staticmethod - def _set_lr(module, grad_input, grad_output): - grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input))) - grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output))) - - def forward(self, x): - offset = self.p_conv(x) - if self.modulation: - m = torch.sigmoid(self.m_conv(x)) - - dtype = offset.data.type() - ks = self.kernel_size - N = offset.size(1) // 2 - - if self.padding: - x = self.zero_padding(x) - - # (b, 2N, h, w) - p = self._get_p(offset, dtype) - - # (b, h, w, 2N) - p = p.contiguous().permute(0, 2, 3, 1) - q_lt = p.detach().floor() - q_rb = q_lt + 1 - - q_lt = torch.cat([torch.clamp(q_lt[..., :N], 0, x.size( - 2)-1), torch.clamp(q_lt[..., N:], 0, x.size(3)-1)], dim=-1).long() - q_rb = torch.cat([torch.clamp(q_rb[..., :N], 0, x.size( - 2)-1), torch.clamp(q_rb[..., N:], 0, x.size(3)-1)], dim=-1).long() - q_lb = torch.cat([q_lt[..., :N], q_rb[..., N:]], dim=-1) - q_rt = torch.cat([q_rb[..., :N], q_lt[..., N:]], dim=-1) - - # clip p - p = torch.cat([torch.clamp(p[..., :N], 0, x.size(2)-1), - torch.clamp(p[..., N:], 0, x.size(3)-1)], dim=-1) - - # bilinear kernel (b, h, w, N) - g_lt = (1 + (q_lt[..., :N].type_as(p) - p[..., :N])) * \ - (1 + (q_lt[..., N:].type_as(p) - p[..., N:])) - g_rb = (1 - (q_rb[..., :N].type_as(p) - p[..., :N])) * \ - (1 - (q_rb[..., N:].type_as(p) - p[..., N:])) - g_lb = (1 + (q_lb[..., :N].type_as(p) - p[..., :N])) * \ - (1 - (q_lb[..., N:].type_as(p) - p[..., N:])) - g_rt = (1 - (q_rt[..., :N].type_as(p) - p[..., :N])) * \ - (1 + (q_rt[..., N:].type_as(p) - p[..., N:])) - - # (b, c, h, w, N) - x_q_lt = self._get_x_q(x, q_lt, N) - x_q_rb = self._get_x_q(x, q_rb, N) - x_q_lb = self._get_x_q(x, q_lb, N) - x_q_rt = self._get_x_q(x, q_rt, N) - - # (b, c, h, w, N) - x_offset = g_lt.unsqueeze(dim=1) * x_q_lt + \ - g_rb.unsqueeze(dim=1) * x_q_rb + \ - g_lb.unsqueeze(dim=1) * x_q_lb + \ - g_rt.unsqueeze(dim=1) * x_q_rt - - # modulation - if self.modulation: - m = m.contiguous().permute(0, 2, 3, 1) - m = m.unsqueeze(dim=1) - m = torch.cat([m for _ in range(x_offset.size(1))], dim=1) - x_offset *= m - - x_offset = self._reshape_x_offset(x_offset, ks) - out = self.conv(x_offset) - - return out - - def _get_p_n(self, N, dtype): - p_n_x, p_n_y = torch.meshgrid( - torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1), - torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1)) - # (2N, 1) - p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0) - p_n = p_n.view(1, 2*N, 1, 1).type(dtype) - - return p_n - - def _get_p_0(self, h, w, N, dtype): - p_0_x, p_0_y = torch.meshgrid( - torch.arange(1, h*self.stride+1, self.stride), - torch.arange(1, w*self.stride+1, self.stride)) - p_0_x = torch.flatten(p_0_x).view(1, 1, h, w).repeat(1, N, 1, 1) - p_0_y = torch.flatten(p_0_y).view(1, 1, h, w).repeat(1, N, 1, 1) - p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype) - - return p_0 - - def _get_p(self, offset, dtype): - N, h, w = offset.size(1)//2, offset.size(2), offset.size(3) - - # (1, 2N, 1, 1) - p_n = self._get_p_n(N, dtype) - # (1, 2N, h, w) - p_0 = self._get_p_0(h, w, N, dtype) - p = p_0 + p_n + offset - return p - - def _get_x_q(self, x, q, N): - b, h, w, _ = q.size() - padded_w = x.size(3) - c = x.size(1) - # (b, c, h*w) - x = x.contiguous().view(b, c, -1) - - # (b, h, w, N) - index = q[..., :N]*padded_w + q[..., N:] # offset_x*w + offset_y - # (b, c, h*w*N) - index = index.contiguous().unsqueeze(dim=1).expand(-1, c, - - 1, -1, -1).contiguous().view(b, c, -1) - - x_offset = x.gather( - dim=-1, index=index).contiguous().view(b, c, h, w, N) - - return x_offset - - @staticmethod - def _reshape_x_offset(x_offset, ks): - b, c, h, w, N = x_offset.size() - x_offset = torch.cat( - [x_offset[..., s:s+ks].contiguous().view(b, c, h, w*ks) for s in range(0, N, ks)], dim=-1) - x_offset = x_offset.contiguous().view(b, c, h*ks, w*ks) - - return x_offset - - -class GAP(nn.Module): - def __init__(self): - super(GAP, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - - def forward(self, x): - # b, c, _, _ = x.size() - return self.avg_pool(x) # .view(b, c) - - -class Silence(nn.Module): - def __init__(self): - super(Silence, self).__init__() - - def forward(self, x): - return x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ScaleChannel(nn.Module): - def __init__(self, layers): - super(ScaleChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return x.expand_as(a) * a - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ShiftChannel(nn.Module): - def __init__(self, layers): - super(ShiftChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return a.expand_as(x) + x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ShiftChannel2D(nn.Module): - def __init__(self, layers): - super(ShiftChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1, -1, 1, 1) - return a.expand_as(x) + x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ControlChannel(nn.Module): - def __init__(self, layers): - super(ControlChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return a.expand_as(x) * x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ControlChannel2D(nn.Module): - def __init__(self, layers): - super(ControlChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1, -1, 1, 1) - return a.expand_as(x) * x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class AlternateChannel(nn.Module): - def __init__(self, layers): - super(AlternateChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return torch.cat([a.expand_as(x), x], dim=1) - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class AlternateChannel2D(nn.Module): - def __init__(self, layers): - super(AlternateChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1, -1, 1, 1) - return torch.cat([a.expand_as(x), x], dim=1) - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class SelectChannel(nn.Module): - def __init__(self, layers): - super(SelectChannel, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return a.sigmoid().expand_as(x) * x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class SelectChannel2D(nn.Module): - def __init__(self, layers): - super(SelectChannel2D, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]].view(1, -1, 1, 1) - return a.sigmoid().expand_as(x) * x - - -# weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 -class ScaleSpatial(nn.Module): - def __init__(self, layers): - super(ScaleSpatial, self).__init__() - self.layers = layers # layer indices - - def forward(self, x, outputs): - a = outputs[self.layers[0]] - return x * a - - -class ImplicitA(nn.Module): - def __init__(self, channel): - super(ImplicitA, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit - - -class ImplicitC(nn.Module): - def __init__(self, channel): - super(ImplicitC, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit - - -class ImplicitM(nn.Module): - def __init__(self, channel): - super(ImplicitM, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) - nn.init.normal_(self.implicit, mean=1., std=.02) - - def forward(self): - return self.implicit - - -class Implicit2DA(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DA, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, atom, channel, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit - - -class Implicit2DC(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DC, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.zeros(1, atom, channel, 1)) - nn.init.normal_(self.implicit, std=.02) - - def forward(self): - return self.implicit - - -class Implicit2DM(nn.Module): - def __init__(self, atom, channel): - super(Implicit2DM, self).__init__() - self.channel = channel - self.implicit = nn.Parameter(torch.ones(1, atom, channel, 1)) - nn.init.normal_(self.implicit, mean=1., std=.02) - - def forward(self): - return self.implicit diff --git a/model_image/yolor/utils/loss.py b/model_image/yolor/utils/loss.py deleted file mode 100644 index 284a22e..0000000 --- a/model_image/yolor/utils/loss.py +++ /dev/null @@ -1,186 +0,0 @@ -# Loss functions - -import torch -import torch.nn as nn - -from yolor.utils.general import bbox_iou -from yolor.utils.torch_utils import is_parallel - - -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets - return 1.0 - 0.5 * eps, 0.5 * eps - - -class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. - def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() - self.loss_fcn = nn.BCEWithLogitsLoss( - reduction='none') # must be nn.BCEWithLogitsLoss() - self.alpha = alpha - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - pred = torch.sigmoid(pred) # prob from logits - dx = pred - true # reduce only missing label effects - # dx = (pred - true).abs() # reduce missing label and false label effects - alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) - loss *= alpha_factor - return loss.mean() - - -class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) - def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() - self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() - self.gamma = gamma - self.alpha = alpha - self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element - - def forward(self, pred, true): - loss = self.loss_fcn(pred, true) - # p_t = torch.exp(-loss) - # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability - - # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py - pred_prob = torch.sigmoid(pred) # prob from logits - p_t = true * pred_prob + (1 - true) * (1 - pred_prob) - alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) - modulating_factor = (1.0 - p_t) ** self.gamma - loss *= alpha_factor * modulating_factor - - if self.reduction == 'mean': - return loss.mean() - elif self.reduction == 'sum': - return loss.sum() - else: # 'none' - return loss - - -def compute_loss(p, targets, model): # predictions, targets, model - device = targets.device - # print(device) - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros( - 1, device=device), torch.zeros(1, device=device) - tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets - h = model.hyp # hyperparameters - - # Define criteria - BCEcls = nn.BCEWithLogitsLoss( - pos_weight=torch.Tensor([h['cls_pw']])).to(device) - BCEobj = nn.BCEWithLogitsLoss( - pos_weight=torch.Tensor([h['obj_pw']])).to(device) - - # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - cp, cn = smooth_BCE(eps=0.0) - - # Focal loss - g = h['fl_gamma'] # focal loss gamma - if g > 0: - BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - - # Losses - nt = 0 # number of targets - no = len(p) # number of outputs - balance = [4.0, 1.0, 0.4] if no == 3 else [ - 4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6 - balance = [4.0, 1.0, 0.5, 0.4, 0.1] if no == 5 else balance - for i, pi in enumerate(p): # layer index, layer predictions - b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj - - n = b.shape[0] # number of targets - if n: - nt += n # cumulative targets - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets - - # Regression - pxy = ps[:, :2].sigmoid() * 2. - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] - pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box - # iou(prediction, target) - iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) - lbox += (1.0 - iou).mean() # iou loss - - # Objectness - tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * \ - iou.detach().clamp(0).type(tobj.dtype) # iou ratio - - # Classification - if model.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], cn, device=device) # targets - t[range(n), tcls[i]] = cp - lcls += BCEcls(ps[:, 5:], t) # BCE - - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - - lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss - - s = 3 / no # output count scaling - lbox *= h['box'] * s - lobj *= h['obj'] * s * (1.4 if no >= 4 else 1.) - lcls *= h['cls'] * s - bs = tobj.shape[0] # batch size - - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() - - -def build_targets(p, targets, model): - nt = targets.shape[0] # number of anchors, targets - tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(6, device=targets.device) # normalized to gridspace gain - off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], - device=targets.device).float() # overlap offsets - - g = 0.5 # offset - multi_gpu = is_parallel(model) - for i, jj in enumerate(model.module.yolo_layers if multi_gpu else model.yolo_layers): - # get number of grid points and anchor vec for this yolo layer - anchors = model.module.module_list[jj].anchor_vec if multi_gpu else model.module_list[jj].anchor_vec - gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain - - # Match targets to anchors - a, t, offsets = [], targets * gain, 0 - if nt: - na = anchors.shape[0] # number of anchors - # anchor tensor, same as .repeat_interleave(nt) - at = torch.arange(na).view(na, 1).repeat(1, nt) - r = t[None, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max( - r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare - # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2)) - a, t = at[j], t.repeat(na, 1, 1)[j] # filter - - # overlaps - gxy = t[:, 2:4] # grid xy - z = torch.zeros_like(gxy) - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T - a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat( - (t, t[j], t[k], t[l], t[m]), 0) - offsets = torch.cat( - (z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g - - # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh - gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices - - # Append - # indices.append((b, a, gj, gi)) # image, anchor, grid indices - # image, anchor, grid indices - indices.append( - (b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) - tbox.append(torch.cat((gxy - gij, gwh), 1)) # box - anch.append(anchors[a]) # anchors - tcls.append(c) # class - - return tcls, tbox, indices, anch diff --git a/model_image/yolor/utils/metrics.py b/model_image/yolor/utils/metrics.py deleted file mode 100644 index fa25849..0000000 --- a/model_image/yolor/utils/metrics.py +++ /dev/null @@ -1,146 +0,0 @@ -# Model validation metrics - -import numpy as np - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def fitness_p(x): - # Model fitness as a weighted combination of metrics - w = [1.0, 0.0, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def fitness_r(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 1.0, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def fitness_ap50(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 1.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def fitness_ap(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def fitness_f(x): - # Model fitness as a weighted combination of metrics - # w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return ((x[:, 0]*x[:, 1])/(x[:, 0]+x[:, 1])) - - -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-recall_curve.png'): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - fname: Plot filename - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes = np.unique(target_cls) - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 - # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) - s = [unique_classes.shape[0], tp.shape[1]] - ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels - n_p = i.sum() # number of predictions - - if n_p == 0 or n_l == 0: - continue - else: - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + 1e-16) # recall curve - # r at pr_score, negative x, xp because xp decreases - r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-pr_score, -conf[i], - precision[:, 0]) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap( - recall[:, j], precision[:, j]) - if j == 0: - # precision at mAP@0.5 - py.append(np.interp(px, mrec, mpre)) - - # Compute F1 score (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) - - if plot: - py = np.stack(py, axis=1) - fig, ax = plt.subplots(1, 1, figsize=(5, 5)) - ax.plot(px, py, linewidth=0.5, color='grey') # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=2, color='blue', - label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend() - fig.tight_layout() - fig.savefig(fname, dpi=200) - - return p, r, ap, f1, unique_classes.astype('int32') - - -def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rbgirshick/py-faster-rcnn. - # Arguments - recall: The recall curve (list). - precision: The precision curve (list). - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Append sentinel values to beginning and end - mrec = np.concatenate(([0.0], recall, [1.0])) - mpre = np.concatenate(([1.0], precision, [0.0])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - # points where x axis (recall) changes - i = np.where(mrec[1:] != mrec[:-1])[0] - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec diff --git a/model_image/yolor/utils/parse_config.py b/model_image/yolor/utils/parse_config.py deleted file mode 100644 index 4a03a8c..0000000 --- a/model_image/yolor/utils/parse_config.py +++ /dev/null @@ -1,74 +0,0 @@ -import os - -import numpy as np - - -def parse_model_cfg(path): - # Parse the yolo *.cfg file and return module definitions path may be 'cfg/yolov3.cfg', 'yolov3.cfg', or 'yolov3' - - with open(path, 'r') as f: - lines = f.read().split('\n') - lines = [x for x in lines if x and not x.startswith('#')] - lines = [x.rstrip().lstrip() - for x in lines] # get rid of fringe whitespaces - mdefs = [] # module definitions - for line in lines: - if line.startswith('['): # This marks the start of a new block - mdefs.append({}) - mdefs[-1]['type'] = line[1:-1].rstrip() - if mdefs[-1]['type'] == 'convolutional': - # pre-populate with zeros (may be overwritten later) - mdefs[-1]['batch_normalize'] = 0 - - else: - key, val = line.split("=") - key = key.rstrip() - - if key == 'anchors': # return nparray - # np anchors - mdefs[-1][key] = np.array([float(x) - for x in val.split(',')]).reshape((-1, 2)) - elif (key in ['from', 'layers', 'mask']) or (key == 'size' and ',' in val): # return array - mdefs[-1][key] = [int(x) for x in val.split(',')] - else: - val = val.strip() - if val.isnumeric(): # return int or float - mdefs[-1][key] = int(val) if (int(val) - - float(val)) == 0 else float(val) - else: - mdefs[-1][key] = val # return string - - # Check all fields are supported - supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups', - 'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random', - 'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind', - 'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'atoms', 'na', 'nc'] - - f = [] # fields - for x in mdefs[1:]: - [f.append(k) for k in x if k not in f] - u = [x for x in f if x not in supported] # unsupported fields - assert not any( - u), "Unsupported fields %s in %s. See https://github.com/ultralytics/yolov3/issues/631" % (u, path) - - return mdefs - - -def parse_data_cfg(path): - # Parses the data configuration file - # add data/ prefix if omitted - if not os.path.exists(path) and os.path.exists('data' + os.sep + path): - path = 'data' + os.sep + path - - with open(path, 'r') as f: - lines = f.readlines() - - options = dict() - for line in lines: - line = line.strip() - if line == '' or line.startswith('#'): - continue - key, val = line.split('=') - options[key.strip()] = val.strip() - - return options diff --git a/model_image/yolor/utils/plots.py b/model_image/yolor/utils/plots.py deleted file mode 100644 index 586824b..0000000 --- a/model_image/yolor/utils/plots.py +++ /dev/null @@ -1,404 +0,0 @@ -# Plotting utils - -import glob -import math -import os -import random -from copy import copy -from pathlib import Path - -import cv2 -import numpy as np -import torch -import yaml -from PIL import Image -from scipy.signal import butter, filtfilt - -from utils.general import xywh2xyxy, xyxy2xywh -from utils.metrics import fitness - -# Settings - - -def color_list(): - # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb - def hex2rgb(h): - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - - return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']] - - -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace( - x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - -def plot_one_box(x, img, color=None, label=None, line_thickness=None): - # Plots one bounding box on image img - tl = line_thickness or round( - 0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness - color = color or [random.randint(0, 255) for _ in range(3)] - c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) - if label: - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, - [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) - - -def plot_wh_methods(): # from utils.general import *; plot_wh_methods() - # Compares the two methods for width-height anchor multiplication - # https://github.com/ultralytics/yolov3/issues/168 - x = np.arange(-4.0, 4.0, .1) - ya = np.exp(x) - yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 - - fig = plt.figure(figsize=(6, 3), dpi=150) - plt.plot(x, ya, '.-', label='YOLO') - plt.plot(x, yb ** 2, '.-', label='YOLO ^2') - plt.plot(x, yb ** 1.6, '.-', label='YOLO ^1.6') - plt.xlim(left=-4, right=4) - plt.ylim(bottom=0, top=6) - plt.xlabel('input') - plt.ylabel('output') - plt.grid() - plt.legend() - fig.tight_layout() - fig.savefig('comparison.png', dpi=200) - - -def output_to_target(output, width, height): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] - if isinstance(output, torch.Tensor): - output = output.cpu().numpy() - - targets = [] - for i, o in enumerate(output): - if o is not None: - for pred in o: - box = pred[:4] - w = (box[2] - box[0]) / width - h = (box[3] - box[1]) / height - x = box[0] / width + w / 2 - y = box[1] / height + h / 2 - conf = pred[4] - cls = int(pred[5]) - - targets.append([i, cls, x, y, w, h, conf]) - - return np.array(targets) - - -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): - # Plot image grid with labels - - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - - # un-normalise - if np.max(images[0]) <= 1: - images *= 255 - - tl = 3 # line thickness - tf = max(tl - 1, 1) # font thickness - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - - # Check if we should resize - scale_factor = max_size / max(h, w) - if scale_factor < 1: - h = math.ceil(scale_factor * h) - w = math.ceil(scale_factor * w) - - colors = color_list() # list of colors - mosaic = np.full((int(ns * h), int(ns * w), 3), - 255, dtype=np.uint8) # init - for i, img in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - - block_x = int(w * (i // ns)) - block_y = int(h * (i % ns)) - - img = img.transpose(1, 2, 0) - if scale_factor < 1: - img = cv2.resize(img, (w, h)) - - mosaic[block_y:block_y + h, block_x:block_x + w, :] = img - if len(targets) > 0: - image_targets = targets[targets[:, 0] == i] - boxes = xywh2xyxy(image_targets[:, 2:6]).T - classes = image_targets[:, 1].astype('int') - labels = image_targets.shape[1] == 6 # labels if no conf column - # check for confidence presence (label vs pred) - conf = None if labels else image_targets[:, 6] - - boxes[[0, 2]] *= w - boxes[[0, 2]] += block_x - boxes[[1, 3]] *= h - boxes[[1, 3]] += block_y - for j, box in enumerate(boxes.T): - cls = int(classes[j]) - color = colors[cls % len(colors)] - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = '%s' % cls if labels else '%s %.1f' % ( - cls, conf[j]) - plot_one_box(box, mosaic, label=label, - color=color, line_thickness=tl) - - # Draw image filename labels - if paths: - label = Path(paths[i]).name[:40] # trim to 40 char - t_size = cv2.getTextSize( - label, 0, fontScale=tl / 3, thickness=tf)[0] - cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, - lineType=cv2.LINE_AA) - - # Image border - cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, - block_y + h), (255, 255, 255), thickness=3) - - if fname: - r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size - mosaic = cv2.resize( - mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) - # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save - Image.fromarray(mosaic).save(fname) # PIL save - return mosaic - - -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): - # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy( - scheduler) # do not modify originals - y = [] - for _ in range(epochs): - scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') - plt.grid() - plt.xlim(0, epochs) - plt.ylim(0) - plt.tight_layout() - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) - - -def plot_test_txt(): # from utils.general import *; plot_test() - # Plot test.txt histograms - x = np.loadtxt('test.txt', dtype=np.float32) - box = xyxy2xywh(x[:, :4]) - cx, cy = box[:, 0], box[:, 1] - - fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) - ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) - - fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) - ax[0].hist(cx, bins=600) - ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) - - -def plot_targets_txt(): # from utils.general import *; plot_targets_txt() - # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % - (x[i].mean(), x[i].std())) - ax[i].legend() - ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) - - -# from utils.general import *; plot_study_txt() -def plot_study_txt(f='study.txt', x=None): - # Plot study.txt generated by test.py - fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - ax = ax.ravel() - - fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - for f in ['study/study_coco_yolo%s.txt' % x for x in ['s', 'm', 'l', 'x']]: - y = np.loadtxt(f, dtype=np.float32, usecols=[ - 0, 1, 2, 3, 7, 8, 9], ndmin=2).T - x = np.arange(y.shape[1]) if x is None else np.array(x) - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', - 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) - - j = y[3].argmax() + 1 - ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, - label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') - - ax2.grid() - ax2.set_xlim(0, 30) - ax2.set_ylim(28, 50) - ax2.set_yticks(np.arange(30, 55, 5)) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - plt.savefig('study_mAP_latency.png', dpi=300) - plt.savefig(f.replace('.txt', '.png'), dpi=300) - - -def plot_labels(labels, save_dir=''): - # plot dataset labels - c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes - nc = int(c.max() + 1) # number of classes - - fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) - ax = ax.ravel() - ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - ax[0].set_xlabel('classes') - ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet') - ax[1].set_xlabel('x') - ax[1].set_ylabel('y') - ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet') - ax[2].set_xlabel('width') - ax[2].set_ylabel('height') - plt.savefig(Path(save_dir) / 'labels.png', dpi=200) - plt.close() - - # seaborn correlogram - try: - import seaborn as sns - import pandas as pd - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) - sns.pairplot(x, corner=True, diag_kind='hist', kind='scatter', markers='o', - plot_kws=dict(s=3, edgecolor=None, - linewidth=1, alpha=0.02), - diag_kws=dict(bins=50)) - plt.savefig(Path(save_dir) / 'labels_correlogram.png', dpi=200) - plt.close() - except Exception as e: - pass - - -# from utils.general import *; plot_evolution() -def plot_evolution(yaml_file='data/hyp.finetune.yaml'): - # Plot hyperparameter evolution results in evolve.txt - with open(yaml_file) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) - x = np.loadtxt('evolve.txt', ndmin=2) - f = fitness(x) - # weights = (f - f.min()) ** 2 # for weighted results - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - for i, (k, v) in enumerate(hyp.items()): - y = x[:, i + 7] - # mu = (y * weights).sum() / weights.sum() # best weighted result - mu = y[f.argmax()] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', - alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={ - 'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - plt.savefig('evolve.png', dpi=200) - print('\nPlot saved as evolve.png') - - -# from utils.general import *; plot_results_overlay() -def plot_results_overlay(start=0, stop=0): - # Plot training 'results*.txt', overlaying train and val losses - s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', - 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends - t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles - for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): - results = np.loadtxt( - f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) - ax = ax.ravel() - for i in range(5): - for j in [i, i + 5]: - y = results[j, x] - ax[i].plot(x, y, marker='.', label=s[j]) - # y_smooth = butter_lowpass_filtfilt(y) - # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) - - ax[i].set_title(t[i]) - ax[i].legend() - ax[i].set_ylabel(f) if i == 0 else None # add filename - fig.savefig(f.replace('.txt', '.png'), dpi=200) - - -def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): - # from utils.general import *; plot_results(save_dir='runs/train/exp0') - # Plot training 'results*.txt' - fig, ax = plt.subplots(2, 5, figsize=(12, 6)) - ax = ax.ravel() - s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', - 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] - if bucket: - # os.system('rm -rf storage.googleapis.com') - # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] - files = ['%g.txt' % x for x in id] - c = ('gsutil cp ' + '%s ' * len(files) + - '.') % tuple('gs://%s/%g.txt' % (bucket, x) for x in id) - os.system(c) - else: - files = glob.glob(str(Path(save_dir) / '*.txt')) + \ - glob.glob('../../Downloads/results*.txt') - assert len( - files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) - for fi, f in enumerate(files): - try: - results = np.loadtxt( - f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - for i in range(10): - y = results[i, x] - if i in [0, 1, 2, 5, 6, 7]: - y[y == 0] = np.nan # don't show zero loss values - # y /= y[0] # normalize - label = labels[fi] if len(labels) else Path(f).stem - ax[i].plot(x, y, marker='.', label=label, - linewidth=1, markersize=6) - ax[i].set_title(s[i]) - # if i in [5, 6, 7]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - - fig.tight_layout() - ax[1].legend() - fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/model_image/yolor/utils/torch_utils.py b/model_image/yolor/utils/torch_utils.py deleted file mode 100644 index 4d07baa..0000000 --- a/model_image/yolor/utils/torch_utils.py +++ /dev/null @@ -1,240 +0,0 @@ -# PyTorch utils - -import logging -import math -import os -import time -from contextlib import contextmanager -from copy import deepcopy - -import torch -import torch.backends.cudnn as cudnn -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -logger = logging.getLogger(__name__) - - -@contextmanager -def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ - if local_rank not in [-1, 0]: - torch.distributed.barrier() - yield - if local_rank == 0: - torch.distributed.barrier() - - -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.deterministic = True - cudnn.benchmark = False - else: # faster, less reproducible - cudnn.deterministic = False - cudnn.benchmark = True - - -def select_device(device='', batch_size=None): - # device = 'cpu' or '0' or '0,1,2,3' - cpu_request = device.lower() == 'cpu' - if device and not cpu_request: # if device requested other than 'cpu' - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity - - cuda = False if cpu_request else torch.cuda.is_available() - if cuda: - c = 1024 ** 2 # bytes to MB - ng = torch.cuda.device_count() - if ng > 1 and batch_size: # check that batch_size is compatible with device_count - assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng) - x = [torch.cuda.get_device_properties(i) for i in range(ng)] - s = f'Using torch {torch.__version__} ' - for i in range(0, ng): - if i == 1: - s = ' ' * len(s) - logger.info("%sCUDA:%g (%s, %dMB)" % (s, i, x[i].name, x[i].total_memory / c)) - else: - logger.info(f'Using torch {torch.__version__} CPU') - - logger.info('') # skip a line - return torch.device('cuda:0' if cuda else 'cpu') - - -def time_synchronized(): - torch.cuda.synchronize() if torch.cuda.is_available() else None - return time.time() - - -def is_parallel(model): - return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - -def initialize_weights(model): - for m in model.modules(): - t = type(m) - if t is nn.Conv2d: - pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif t is nn.BatchNorm2d: - m.eps = 1e-3 - m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: - m.inplace = True - - -def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' - return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] - - -def sparsity(model): - # Return global model sparsity - a, b = 0., 0. - for p in model.parameters(): - a += p.numel() - b += (p == 0).sum() - return b / a - - -def prune(model, amount=0.3): - # Prune model to requested global sparsity - import torch.nn.utils.prune as prune - print('Pruning model... ', end='') - for name, m in model.named_modules(): - if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) - - -def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) - - # prepare filters - w_conv = conv.weight.clone().view(conv.out_channels, -1) - w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) - - # prepare spatial bias - b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias - b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) - fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) - - return fusedconv - - -def model_info(model, verbose=False, img_size=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] - n_p = sum(x.numel() for x in model.parameters()) # number parameters - n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients - if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) - for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - - try: # FLOPS - from thop import profile - flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, img_size, img_size),), verbose=False)[0] / 1E9 * 2 - img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.9f GFLOPS' % (flops) # 640x640 FLOPS - except (ImportError, Exception): - fs = '' - - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - -def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio - # scales img(bs,3,y,x) by ratio - if ratio == 1.0: - return img - else: - h, w = img.shape[2:] - s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize - if not same_shape: # pad/crop img - gs = 32 # (pixels) grid size - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] - return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean - - -def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] - for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: - continue - else: - setattr(a, k, v) - - -class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. - """ - - def __init__(self, model, decay=0.9999, updates=0): - # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA - self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): - p.requires_grad_(False) - - def update(self, model): - # Update EMA parameters - with torch.no_grad(): - self.updates += 1 - d = self.decay(self.updates) - - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict - for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: - v *= d - v += (1. - d) * msd[k].detach() - - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): - # Update EMA attributes - copy_attr(self.ema, model, include, exclude) From 2df0f91906aefcdcb93dc1efb74c54543648b4ab Mon Sep 17 00:00:00 2001 From: eugenos-programos Date: Mon, 11 Sep 2023 13:08:52 +0300 Subject: [PATCH 2/4] feat: change bottle and human model on yolo8 --- connection/ModelPredictionsReceiver.py | 1 - model_image/ObjectDetectionModel.py | 7 ++++--- model_image/app.py | 9 ++++----- model_image/requirements.txt | 3 ++- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/connection/ModelPredictionsReceiver.py b/connection/ModelPredictionsReceiver.py index 9c48203..61c64ae 100644 --- a/connection/ModelPredictionsReceiver.py +++ b/connection/ModelPredictionsReceiver.py @@ -11,7 +11,6 @@ def __init__(self, server_url: str, logger: Logger) -> None: self._server_url = server_url self._logger = logger self._port = configs["port"] - print("server_url - ", self._server_url) @staticmethod def _convert_image2bytes(image: np.array, format='PNG') -> io.BytesIO: diff --git a/model_image/ObjectDetectionModel.py b/model_image/ObjectDetectionModel.py index 7f5ccd2..e740beb 100644 --- a/model_image/ObjectDetectionModel.py +++ b/model_image/ObjectDetectionModel.py @@ -4,15 +4,16 @@ class YOLOv8ObjDetectionModel: - def __init__(self, path: str, conf_thresh: float, iou_thresh: float, classes: list) -> None: - self.model = YOLO(path) + def __init__(self, model_path: str, conf_thresh: float, iou_thresh: float, classes: list, img_size: int) -> None: + self._model = YOLO(model_path) self.conf_thresh = conf_thresh self.iou_thresh = iou_thresh self.classes = classes + self.img_size = img_size @torch.no_grad() def __call__(self, img: np.array, classes: list = None) -> list: - results = self.model( + results = self._model( source=img, conf=self.conf_thresh, iou=self.iou_thresh, diff --git a/model_image/app.py b/model_image/app.py index f808f61..23a4e86 100644 --- a/model_image/app.py +++ b/model_image/app.py @@ -1,8 +1,7 @@ from PIL import Image from flask import Flask, jsonify, request -from flask_configs.load_configs import * +from flask_configs.load_configs import bottle_model_configs, box_model_configs, human_model_configs from ObjectDetectionModel import YOLOv8ObjDetectionModel -from YOLORObjectDetectionModel import YOLORObjectDetectionModel import numpy as np import colorlog import logging @@ -24,9 +23,9 @@ logger.propagate = False app = Flask(__name__) -human_model = YOLORObjectDetectionModel(HUMAN_MODEL_PATH, CONF_PATH, CONF_THRES, IOU_THRES, CLASSES, logger=logger) -box_model = YOLOv8ObjDetectionModel(BOX_MODEL_PATH, CONF_THRES, IOU_THRES, CLASSES) -bottle_model = YOLORObjectDetectionModel(HUMAN_MODEL_PATH, CONF_PATH, 0.2, IOU_THRES, [39], logger=logger) +human_model = YOLOv8ObjDetectionModel(**human_model_configs) +box_model = YOLOv8ObjDetectionModel(**box_model_configs) +bottle_model = YOLOv8ObjDetectionModel(**bottle_model_configs) convert_bytes2image = lambda bytes: np.array(Image.open(io.BytesIO(bytes)), dtype=np.uint8) diff --git a/model_image/requirements.txt b/model_image/requirements.txt index fb2803b..5ff082a 100644 --- a/model_image/requirements.txt +++ b/model_image/requirements.txt @@ -3,4 +3,5 @@ httplib2==0.22.0 numpy==1.22.3 opencv_python==4.7.0.72 ultralytics==8.0.136 -Flask==2.2.2 \ No newline at end of file +Flask==2.2.2 +py-cpuinfo==9.0.0 From f93b694b68a3d3991f089d6bf1563fd59d91164a Mon Sep 17 00:00:00 2001 From: eugenos-programos Date: Mon, 11 Sep 2023 13:09:30 +0300 Subject: [PATCH 3/4] refactor: change configurations structure --- model_image/flask_configs/flask_confs.json | 36 ++++++++++++++++------ model_image/flask_configs/load_configs.py | 11 ++----- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/model_image/flask_configs/flask_confs.json b/model_image/flask_configs/flask_confs.json index c4ebb75..59c2e47 100644 --- a/model_image/flask_configs/flask_confs.json +++ b/model_image/flask_configs/flask_confs.json @@ -1,12 +1,30 @@ { - "classes": [ - 0 - ], - "iou_thres": 0.5, - "conf_thres": 0.5, - "config_path": "weigths/yolor_csp_x.cfg", - "box_detect_model": "weigths/min_max_v0.3.10.pt", - "human_detect_model": "weigths/min_max_v0.4.4b.pt", - "img_size": 640, + "box_model": { + "classes": [ + 0 + ], + "iou_thresh": 0.5, + "conf_thresh": 0.5, + "model_path": "weigths/min_max_v0.3.10_box.pt", + "img_size": 640 + }, + "human_model": { + "classes": [ + 0 + ], + "iou_thresh": 0.5, + "conf_thresh": 0.5, + "model_path": "weigths/min_max_v0.5.2_bottle.pt", + "img_size": 640 + }, + "bottle_model": { + "classes": [ + 0 + ], + "iou_thresh": 0.5, + "conf_thresh": 0.6, + "model_path": "weigths/min_max_v0.5.2_human.pt", + "img_size": 640 + }, "port": 5000 } \ No newline at end of file diff --git a/model_image/flask_configs/load_configs.py b/model_image/flask_configs/load_configs.py index bcf3de6..eef541b 100644 --- a/model_image/flask_configs/load_configs.py +++ b/model_image/flask_configs/load_configs.py @@ -3,11 +3,6 @@ with open("flask_configs/flask_confs.json", "r") as conf: configs = json.load(conf) - CONF_THRES = configs.get("conf_thres") - IOU_THRES = configs.get("iou_thres") - BOX_MODEL_PATH = configs.get("box_detect_model") - HUMAN_MODEL_PATH = configs.get("human_detect_model") - CONF_PATH = configs.get("config_path") - CLASSES = configs.get("classes") - IMG_SIZE = configs.get("img_size") - PORT = configs.get("port") + box_model_configs = configs["box_model"] + human_model_configs = configs["human_model"] + bottle_model_configs = configs["bottle_model"] From 3c4cd534cb3fe714cd9e7a232f1f6be070a1fac2 Mon Sep 17 00:00:00 2001 From: eugenos-programos Date: Mon, 11 Sep 2023 13:10:29 +0300 Subject: [PATCH 4/4] feat(requirements.txt): remove Pillow library --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b9b0f28..6931318 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,6 @@ colorlog==4.8.0 httplib2==0.22.0 numpy==1.22.3 opencv_python==4.7.0.72 -Pillow==9.5.0 python-dotenv==1.0.0 requests==2.27.1 numba==0.57.1 \ No newline at end of file