Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 97 additions & 0 deletions dotadevkit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# please change CocoConvert.py to dotadevkit.py in your virtual environment

# --------------------------------------------------------
# Modified by Ashwin Nair
# Written by Jian Ding for DOTA_Devkit
# --------------------------------------------------------

# common path: /anaconda3/envs/obbdetection/lib/python3.x/site-packages/dotadevkit/ops/CocoConvert.py

import cv2
import json

from dotadevkit.misc.dota_utils import dota_classes, parse_dota_poly2
from pathlib import Path


def DOTA2COCO(srcpath, destfile, version="1.0"):
imageparent = srcpath / "images"
labelparent = srcpath / "labelTxt"
assert version in ["1.0", "1.5", "2.0"]

if version == "1.5":
dota_classes.append("container-crane")

if version == "2.0":
dota_classes.extend(["container-crane", "airport", "helipad"])

data_dict = {}
info = {
"contributor": "Captain Group, Wuhan University",
"data_created": "2018",
"description": f"DOTA dataset version {version}",
"url": "https://captain-whu.github.io/DOTA/dataset.html",
"version": version,
"year": 2018,
}
data_dict["info"] = info
data_dict["images"] = []
data_dict["categories"] = []
data_dict["annotations"] = []

for idex, name in enumerate(dota_classes):
single_cat = {"id": idex + 1, "name": name, "supercategory": name}
data_dict["categories"].append(single_cat)

inst_count = 1
image_id = 1
with open(destfile, "w") as f_out:
filenames = [lbl for lbl in labelparent.iterdir()]
for file in filenames:
basename = file.stem

imagepath = imageparent / (basename + ".png")
img = cv2.imread(str(imagepath))
if img is None:
print("can not load!")
else:
height, width, c = img.shape

single_image = {}
single_image["file_name"] = basename + ".png"
single_image["id"] = image_id
single_image["width"] = width
single_image["height"] = height
data_dict["images"].append(single_image)

# annotations
objects = parse_dota_poly2(file)
for obj in objects:
single_obj = {}
single_obj["area"] = obj["area"]
single_obj["category_id"] = dota_classes.index(obj["name"]) + 1
single_obj["segmentation"] = []
single_obj["segmentation"].append(obj["poly"])
single_obj["iscrowd"] = 0
x1, y1, x2, y2, x3, y3, x4, y4 = (obj["poly"][0], obj["poly"][1], obj["poly"][2], obj["poly"][3], obj["poly"][4], obj["poly"][5], obj["poly"][6], obj["poly"][7])
xmin, ymin, xmax, ymax = (
min(obj["poly"][0::2]),
min(obj["poly"][1::2]),
max(obj["poly"][0::2]),
max(obj["poly"][1::2]),
)

width, height = xmax - xmin, ymax - ymin
# single_obj["bbox"] = xmin, ymin, width, height
single_obj["bbox"] = x1, y1, x2, y2, x3, y3, x4, y4 # modify here
single_obj["image_id"] = image_id
data_dict["annotations"].append(single_obj)
single_obj["id"] = inst_count
inst_count = inst_count + 1
image_id = image_id + 1
json.dump(data_dict, f_out)


if __name__ == "__main__":
out_dir = Path("home/ashwin/Desktop/Projects/dotadevkit/example_split")
DOTA2COCO(out_dir, out_dir / "DOTA_val.json", version="1.0")
2 changes: 1 addition & 1 deletion tools/analysis_tools/model_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,4 +265,4 @@ def get_calibrator(val_file, calibration_file, model_detections, calibration_typ

# Measure Error
print("calibrated test set error:")
calibration_error(predicted_ious_test, dets, num_cl=num_classes)
calibration_error(predicted_ious_test, dets, num_cl=num_classes)
294 changes: 294 additions & 0 deletions tools/analysis_tools/model_calibration_rotate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,294 @@
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
import argparse

import numpy as np
from pycocotools.coco import COCO
import os
from mmdet.core.bbox.iou_calculators.iou2d_calculator import bbox_overlaps
import torch
import pickle
import random
from operator import itemgetter
import json
from mmcv.ops import box_iou_quadri

def set_all_seeds(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True

set_all_seeds(0)

# coco-format annotations are in [x1, y1, w, h]

# def assign_post(ann_dict, det_bboxes, det_score, det_label, dataset_classes, min_iou=0.5, max_iou=0.7):
# num_classes = len(dataset_classes)
# ious = np.zeros([det_bboxes.shape[0]])
# ## Assign
# for k, v in ann_dict.items():
# # Convert to numpy and reshape
# gt_boxes = np.array(v).reshape(-1, 4)

# # Convert to TL, BR representation
# gt_boxes[:, 2] += gt_boxes[:, 0]
# gt_boxes[:, 3] += gt_boxes[:, 1]

# rel_idx = (det_label==k).nonzero()[0]

# ious_cl = (bbox_overlaps(torch.from_numpy(gt_boxes), torch.from_numpy(det_bboxes[rel_idx]))).numpy()

# ious[rel_idx] = np.max(ious_cl, axis=0)

# return ious


# Dota-format annotations are in [x1, y1, x2, y2, x3, y3, x4, y4]
# use box_iou_quadri to compute the iou for rotated boxes
def assign_rotated(ann_dict, det_bboxes, det_score, det_label, dataset_classes):
num_classes = len(dataset_classes)
ious = np.zeros([det_bboxes.shape[0]])
## Assign
for k, v in ann_dict.items():
# Convert to numpy and reshape
gt_boxes = np.array(v).reshape(-1, 8)
rel_idx = (det_label==k).nonzero()[0]
ious_cl = (box_iou_quadri(torch.from_numpy(gt_boxes).float(), torch.from_numpy(det_bboxes[rel_idx]).float())).numpy()
ious[rel_idx] = np.max(ious_cl, axis=0)
return ious

def get_ann(cocoGt, ann_ids, dataset_classes):
anns = cocoGt.loadAnns(ann_ids)
ann_dict = {}
for ann in anns:
key = dataset_classes.index(ann['category_id'])
if key not in ann_dict:
ann_dict[key] = list(ann['bbox'])
else:
ann_dict[key].extend(ann['bbox'])
return ann_dict


def create_calibration_dataset(cocoGt, model_detections, filename, dataset_classes, num_images=500):
all_detections = []
num_classes = len(dataset_classes)

if num_images > 0 and num_images < 2500:
idx = np.random.choice(range(len(cocoGt.dataset['images'])), size=num_images, replace=False)
print('sampled image indices:', idx)
images = itemgetter(*idx)(cocoGt.dataset['images'])
else:
print('using all val set images')
images = cocoGt.dataset['images']


f = open(model_detections)
final_dets = json.load(f)
print('detections are loaded')


for i, img in enumerate(images):
if 'counter' in img:
counter = img['counter']
else:
counter = i

# Get detections for this image
detections = [det for det in final_dets if det['image_id']==img['id']]

det_score = np.array([det['score'] for det in detections])
det_bboxes = np.array([det['bbox'] for det in detections])
det_label = np.array([dataset_classes.index(det['category_id']) for det in detections])

if det_bboxes.ndim < 2:
continue

# Convert to TL, BR representation
# delete these two lines if you want to use assign_rotate()
# det_bboxes[:, 2] += det_bboxes[:, 0]
# det_bboxes[:, 3] += det_bboxes[:, 1]

# Get ground truth bounding boxes
# ann_ids = cocoGt.getAnnIds(imgIds=img['id'])

# Get ground truth bounding boxes
ann_ids = cocoGt.getAnnIds(imgIds=img['id'], iscrowd=False)

ann_dict = get_ann(cocoGt, ann_ids, dataset_classes)

# ious = assign_post(ann_dict, det_bboxes, det_score, det_label, dataset_classes)
ious = assign_rotated(ann_dict, det_bboxes, det_score, det_label, dataset_classes)


detections = np.concatenate((np.expand_dims(det_score, axis=1), np.expand_dims(ious, axis=1), np.expand_dims(det_label, axis=1)), axis=1)


all_detections.append(detections)

dets = np.vstack(all_detections)

np.save(filename, dets)

return dets


def calibration_error(predicted_ious, det_ious, bin_count=25, num_cl=80):
bins = np.linspace(0., 1., bin_count + 1)
errors = np.zeros([num_cl, bin_count])
weights_per_bin = np.zeros([num_cl, bin_count])

total_cls_iou = np.zeros([num_cl])

for cl in range(num_cl):
rel_idx = (det_ious[:, 2]==cl).nonzero()[0]
predicted_ious_cls = predicted_ious[rel_idx]
det_ious_cls = det_ious[rel_idx, 1]

total_det = len(predicted_ious_cls)
total_cls_iou[cl] = total_det

for i in range(bin_count):
# Find detections in this bin
bin_idxs = np.logical_and(bins[i] <= predicted_ious_cls, predicted_ious_cls < bins[i + 1])
bin_pred_ious_cls = predicted_ious_cls[bin_idxs]
bin_det_ious_cls = det_ious_cls[bin_idxs]

num_det = len(bin_pred_ious_cls)

if num_det == 0:
errors[cl, i] = np.nan
weights_per_bin[cl, i] = 0
else:
# Average of Scores in this bin
mean_pred = bin_pred_ious_cls.mean()
mean_det = bin_det_ious_cls.mean()

errors[cl, i] = np.abs(mean_pred - mean_det)

# Weight of the bin
weights_per_bin[cl, i] = num_det / total_det

ECE_OD = np.nanmean(np.nansum(weights_per_bin * errors, axis=1))
ACE_OD = np.nanmean(np.nanmean(errors, axis=1))
MCE_OD = np.nanmean(np.nanmax(errors, axis=1))
print('ECE = ', ECE_OD)
print('ACE=', ACE_OD)
print('MCE=', MCE_OD)

def get_calibration_data(cocoGt, model_detections, filename_val, dataset_classes, num_images=-1):
if not os.path.exists(filename_val):
print('Creating dataset...')
dets = create_calibration_dataset(cocoGt, model_detections, filename_val, dataset_classes, num_images)
else:
print('Reading dataset...')
dets = np.load(filename_val)
return dets


def train_calibrator(coco, dets, dataset_classes, calibration_file, type, class_agnostic):
calibrator = dict()
if class_agnostic:
det_scores = dets[:, 0].reshape(-1, 1)
det_ious = dets[:, 1].reshape(-1)
if type == 'IR':
shared_calibrator = IsotonicRegression(y_min=0., y_max=1., out_of_bounds='clip').fit(det_scores, det_ious)
elif type == 'LR':
shared_calibrator = LinearRegression().fit(det_scores, det_ious)

for cls in range(len(dataset_classes)):
calibrator[cls] = shared_calibrator

else:
for cls in range(len(dataset_classes)):
idx = (dets[:,2] == cls).nonzero()[0]
det_scores = dets[idx, 0].reshape(-1, 1)
det_ious = dets[idx, 1].reshape(-1)


if type == 'IR':
calibrator[cls] = IsotonicRegression(y_min=0., y_max=1., out_of_bounds='clip').fit(det_scores.reshape(-1, 1), det_ious)
elif type == 'LR':
calibrator[cls] = LinearRegression().fit(det_scores.reshape(-1, 1), det_ious)

with open(calibration_file, 'wb') as f:
pickle.dump(calibrator, f)

return calibrator


def predict_prob(calibrator, dets, dataset_classes):
predicted_ious = np.zeros(dets.shape[0])
for cls in range(len(dataset_classes)):
idx = (dets[:, 2] == cls).nonzero()[0]
if len(idx) == 0:
continue
det_scores = dets[idx, 0]
predicted_ious[idx] = np.clip(calibrator[cls].predict(det_scores.reshape(-1, 1)), 0, 1)
return predicted_ious


def get_calibrator(val_file, calibration_file, model_detections, calibration_type,
class_agnostic=False, num_images=-1):
# Get Validation Dataset
cocoGt = COCO(val_file)
dataset_classes = list(cocoGt.cats.keys())
dets = get_calibration_data(cocoGt, model_detections, filename_val, dataset_classes, num_images)

# Learn Calibration Model
print('Fitting calibrator...')
calibrator = train_calibrator(cocoGt, dets, dataset_classes, calibration_file, calibration_type,
class_agnostic)

return calibrator

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('model_name', help='Model Name to Calibrate)')
args = parser.parse_args()

model_name = args.model_name

calibration_type = 'IR'
class_agnostic_calibration = True
num_images = 458

# val_file = 'calibration/data/calibration_val2017.json'
# test_file = 'calibration/data/calibration_test2017.json'
val_file = '/data/dota/val/DOTA_1.0.json'
test_file = '/data/dota/val/DOTA_1.0.json'
# model_detections = "calibration/" + model_name + "/final_detections/val.bbox.json"
# model_detections_test = "calibration/" + model_name + "/final_detections/val.bbox.json"
model_detections = "calibration/" + model_name + "/obb_final_detections/val.bbox.json"
model_detections_test = "calibration/" + model_name + "/obb_final_detections/val.bbox.json"

filename_val = "calibration/" + model_name + "/obb_final_detections/" + 'all_val.npy'
filename_test = "calibration/" + model_name + "/obb_final_detections/" + 'all_test.npy'

if class_agnostic_calibration:
calibration_file = "mocae_rotated_object_detection/" + model_name + "/calibrators/" + calibration_type + '_class_agnostic_finaldets458.pkl'
else:
calibration_file = "mocae_rotated_object_detection/" + model_name + "/calibrators/" + calibration_type + '_class_wise_finaldets458.pkl'


calibrator = get_calibrator(val_file, calibration_file, model_detections, calibration_type, class_agnostic=class_agnostic_calibration,
num_images=num_images)

# Get Test Dataset
cocoGt = COCO(test_file)
dataset_classes = list(cocoGt.cats.keys())
num_classes = len(dataset_classes)

dets = get_calibration_data(cocoGt, model_detections_test, filename_test, dataset_classes)
# Uncalibrated Test Error
print("uncalibrated test set error:")
# Measure Error
calibration_error(dets[:, 0], dets, num_cl=num_classes)

# Get calibrated probabilities on test set
predicted_ious_test = predict_prob(calibrator, dets, dataset_classes)

# Measure Error
print("calibrated test set error:")
calibration_error(predicted_ious_test, dets, num_cl=num_classes)
Loading