Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
pytorch-yolov3 / detect.py
Size: Mime:
#! /usr/bin/env python3

from __future__ import division
import random
from pytorch_yolov3.models import *
from pytorch_yolov3.utils.utils import *
#from pytorch_yolov3.utils.datasets import *
#from pytorch_yolov3.utils.augmentations import *
from pytorch_yolov3.utils.transforms import *

import os
import argparse
import tqdm
import numpy as np

from PIL import Image

import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
#from torch.utils.data import DataLoader
#from torchvision import datasets
from torch.autograd import Variable

import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator


# def detect_directory(model_path, weights_path, img_path, classes, output_path,
#     batch_size=8, img_size=416, n_cpu=8, conf_thres=0.5, nms_thres=0.5,draw=True):
#     """Detects objects on all images in specified directory and saves output images with drawn detections.
#
#     :param model_path: Path to model definition file (.cfg)
#     :type model_path: str
#     :param weights_path: Path to weights or checkpoint file (.weights or .pth)
#     :type weights_path: str
#     :param img_path: Path to directory with images to inference
#     :type img_path: str
#     :param classes: List of class names
#     :type classes: [str]
#     :param output_path: Path to output directory
#     :type output_path: str
#     :param batch_size: Size of each image batch, defaults to 8
#     :type batch_size: int, optional
#     :param img_size: Size of each image dimension for yolo, defaults to 416
#     :type img_size: int, optional
#     :param n_cpu: Number of cpu threads to use during batch generation, defaults to 8
#     :type n_cpu: int, optional
#     :param conf_thres: Object confidence threshold, defaults to 0.5
#     :type conf_thres: float, optional
#     :param nms_thres: IOU threshold for non-maximum suppression, defaults to 0.5
#     :type nms_thres: float, optional
#     """
#     dataloader = _create_data_loader(img_path, batch_size, img_size, n_cpu)
#     model = load_model(model_path, weights_path)
#
#     img_detections, imgs = detect(
#         model,
#         dataloader,
#         output_path,
#         img_size,
#         conf_thres,
#         nms_thres)
#     if draw:
#         _draw_and_save_output_images(img_detections, imgs, img_size, output_path,classes)
#     return img_detections, imgs


def detect_image(model, image,
    img_size=416, conf_thres=0.5, nms_thres=0.5):
    """Inferences one image with model.

    :param model: Model for inference
    :type model: models.Darknet
    :param image: Image to inference
    :type image: nd.array
    :param img_size: Size of each image dimension for yolo, defaults to 416
    :type img_size: int, optional
    :param conf_thres: Object confidence threshold, defaults to 0.5
    :type conf_thres: float, optional
    :param nms_thres: IOU threshold for non-maximum suppression, defaults to 0.5
    :type nms_thres: float, optional
    :return: Detections on image
    :rtype: nd.array
    """
    model.eval()  # Set model to evaluation mode

    # Configure input
    input_img = transforms.Compose([
        DEFAULT_TRANSFORMS,
        Resize(img_size)])(
            (image, np.zeros((1, 5))))[0].unsqueeze(0)

    if torch.cuda.is_available():
        input_img = input_img.to("cuda")

    # Get detections
    with torch.no_grad():
        detections = model(input_img)
        detections = non_max_suppression(detections, conf_thres, nms_thres)
        detections = rescale_boxes(detections[0], img_size, image.shape[:2])
    return to_cpu(detections).numpy()

# def detect(model, dataloader, output_path,
#     img_size, conf_thres, nms_thres):
#     """Inferences images with model.
#
#     :param model: Model for inference
#     :type model: models.Darknet
#     :param dataloader: Dataloader provides the batches of images to inference
#     :type dataloader: DataLoader
#     :param output_path: Path to output directory
#     :type output_path: str
#     :param img_size: Size of each image dimension for yolo, defaults to 416
#     :type img_size: int, optional
#     :param conf_thres: Object confidence threshold, defaults to 0.5
#     :type conf_thres: float, optional
#     :param nms_thres: IOU threshold for non-maximum suppression, defaults to 0.5
#     :type nms_thres: float, optional
#     :return: List of detections. The coordinates are given for the padded image that is provided by the dataloader.
#         Use `utils.rescale_boxes` to transform them into the desired input image coordinate system before its transformed by the dataloader),
#         List of input image paths
#     :rtype: [Tensor], [str]
#     """
#     # Create output directory, if missing
#     os.makedirs(output_path, exist_ok=True)
#
#     model.eval()  # Set model to evaluation mode
#
#     Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
#
#     img_detections = []  # Stores detections for each image index
#     imgs = []  # Stores image paths
#
#     for (img_paths, input_imgs) in tqdm.tqdm(dataloader, desc="Detecting"):
#         # Configure input
#         input_imgs = Variable(input_imgs.type(Tensor))
#
#         # Get detections
#         with torch.no_grad():
#             detections = model(input_imgs)
#             detections = non_max_suppression(detections, conf_thres, nms_thres)
#
#         # Store image and detections
#         img_detections.extend(detections)
#         imgs.extend(img_paths)
#     return img_detections, imgs

def _draw_and_save_output_images(img_detections, imgs, img_size, output_path,classes):
    """Draws detections in output images and stores them.

    :param img_detections: List of detections
    :type img_detections: [Tensor]
    :param imgs: List of paths to image files
    :type imgs: [str]
    :param img_size: Size of each image dimension for yolo
    :type img_size: int
    :param output_path: Path of output directory
    :type output_path: str
    """
    # TODO: Draw class names...
    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    # Iterate through images and save plot of detections
    for (image_path, detections) in tqdm.tqdm(zip(imgs, img_detections), desc="Saving output images"):
        _draw_and_save_output_image(image_path, detections, img_size, colors, output_path,classes)

def _draw_and_save_output_image(image_path, detections, img_size, colors, output_path,classes):
    """Draws detections in output image and stores this.

    :param image_path: Path to input image
    :type image_path: str
    :param detections: List of detections on image
    :type detections: [Tensor]
    :param img_size: Size of each image dimension for yolo
    :type img_size: int
    :param colors: List of colors used to draw detections
    :type colors: []
    :param output_path: Path of output directory
    :type output_path: str
    """
    # Create plot
    if type(image_path) == 'str':
        img = np.array(Image.open(image_path))
    else:
        img = image_path

    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(img)

    if type(detections) == torch.tensor:
        # Rescale boxes to original image
        detections = rescale_boxes(detections, img_size, img.shape[:2])

        unique_labels = detections[:, -1].cpu().unique()
    else:
        unique_labels = np.unique(detections[:, -1])

    n_cls_preds = len(unique_labels)
    bbox_colors = random.sample(colors, n_cls_preds)
    for x1, y1, x2, y2, conf, cls_pred in detections:

        print(f"\t+ Label: {classes[int(cls_pred)]} | Confidence: {conf.item():0.4f}")
        
        box_w = x2 - x1
        box_h = y2 - y1

        color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
        # Create a Rectangle patch
        bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
        # Add the bbox to the plot
        ax.add_patch(bbox)
        # Add label
        plt.text(
            x1,
            y1,
            s=classes[int(cls_pred)],
            color="white",
            verticalalignment="top",
            bbox={"color": color, "pad": 0})

    # Save generated image with detections
    plt.axis("off")
    plt.gca().xaxis.set_major_locator(NullLocator())
    plt.gca().yaxis.set_major_locator(NullLocator())
    if type(image_path)=='str':
        filename = os.path.basename(image_path).split(".")[0]
    else:
        filename = 'result'
    output_path = os.path.join(output_path, f"{filename}.png")
    plt.savefig(output_path, bbox_inches="tight", pad_inches=0.0)
    plt.close()
    return output_path

# def _create_data_loader(img_path, batch_size, img_size, n_cpu):
#     """Creates a DataLoader for inferencing.
#
#     :param img_path: Path to file containing all paths to validation images.
#     :type img_path: str
#     :param batch_size: Size of each image batch
#     :type batch_size: int
#     :param img_size: Size of each image dimension for yolo
#     :type img_size: int
#     :param n_cpu: Number of cpu threads to use during batch generation
#     :type n_cpu: int
#     :return: Returns DataLoader
#     :rtype: DataLoader
#     """
#     dataset = ImageFolder(
#         img_path,
#         transform=transforms.Compose([DEFAULT_TRANSFORMS, Resize(img_size)]))
#     dataloader = DataLoader(
#         dataset,
#         batch_size=batch_size,
#         shuffle=False,
#         num_workers=n_cpu,
#         pin_memory=True)
#     return dataloader

def make_prediction(image):
    from pytorch_yolov3.config import model_config as config
    import PIL
    from pytorch_yolov3.utils import parse_config, utils
    model = config.model
    weights = config.weights
    images = image
    pil_images = images
    classes = config.classes
    output = config.output
    batch_size = config.batch_size
    img_size = config.img_size
    n_cpu = config.n_cpu
    conf_thres = config.conf_thres
    nms_thres = config.nms_thres
    classes = utils.load_classes(classes)  # List of class names

    model_y = load_model(model_path=model, weights_path=weights)
    img_detections = detect_image(model_y, pil_images)
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]
    output_path = _draw_and_save_output_image(images, img_detections, img_size, colors, output, classes)

    if type(images) == 'str':
        input_image = PIL.Image.open(images).resize([img_size,img_size])
    else:
        input_image = image
    assert img_detections is not None
    return PIL.Image.open(output_path),input_image

if __name__ == "__main__":
    # parser = argparse.ArgumentParser(description="Detect objects on images.")
    # parser.add_argument("-m", "--model", type=str, default="config/yolov3.cfg", help="Path to model definition file (.cfg)")
    # parser.add_argument("-w", "--weights", type=str, default="weights/yolov3.weights", help="Path to weights or checkpoint file (.weights or .pth)")
    # parser.add_argument("-i", "--images", type=str, default="data/samples", help="Path to directory with images to inference")
    # parser.add_argument("-c", "--classes", type=str, default="data/coco.names", help="Path to classes label file (.names)")
    # parser.add_argument("-o", "--output", type=str, default="output", help="Path to output directory")
    # parser.add_argument("-b", "--batch_size", type=int, default=1, help="Size of each image batch")
    # parser.add_argument("--img_size", type=int, default=416, help="Size of each image dimension for yolo")
    # parser.add_argument("--n_cpu", type=int, default=8, help="Number of cpu threads to use during batch generation")
    # parser.add_argument("--conf_thres", type=float, default=0.5, help="Object confidence threshold")
    # parser.add_argument("--nms_thres", type=float, default=0.4, help="IOU threshold for non-maximum suppression")
    # args = parser.parse_args()
    # print(args)
    #
    # # Extract class names from file
    # classes = load_classes(args.classes)  # List of class names
    #
    # detect_directory(
    #     args.model,
    #     args.weights,
    #     args.images,
    #     classes,
    #     args.output,
    #     batch_size=args.batch_size,
    #     img_size=args.img_size,
    #     n_cpu=args.n_cpu,
    #     conf_thres=args.conf_thres,
    #     nms_thres=args.nms_thres)

    from config import model_config as config
    import PIL
    from utils import parse_config, utils
    model = config.model
    weights = config.weights
    images = 'data/samples/dog.jpg'
    pil_images = np.asarray(PIL.Image.open(images))
    classes = config.classes
    output = config.output
    batch_size = config.batch_size
    img_size = config.img_size
    n_cpu = config.n_cpu
    conf_thres = config.conf_thres
    nms_thres = config.nms_thres
    classes = utils.load_classes(classes)  # List of class names

    model_y = load_model(model_path=model, weights_path=weights)
    img_detections = detect_image(model_y, pil_images)
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]
    output_path = _draw_and_save_output_image(images,img_detections,img_size,colors ,output,classes)
    PIL.Image.open(output_path).show()
    assert img_detections is not None