title
stringclasses 1
value | text
stringlengths 30
426k
| id
stringlengths 27
30
|
|---|---|---|
ultralytics/models/yolo/detect/val.py/DetectionValidator/init_metrics
class DetectionValidator:
def init_metrics(self, model):
"""Initialize evaluation metrics for YOLO."""
val = self.data.get(self.args.split, "") # validation path
self.is_coco = (
isinstance(val, str)
and "coco" in val
and (val.endswith(f"{os.sep}val2017.txt") or val.endswith(f"{os.sep}test-dev2017.txt"))
) # is COCO
self.is_lvis = isinstance(val, str) and "lvis" in val and not self.is_coco # is LVIS
self.class_map = converter.coco80_to_coco91_class() if self.is_coco else list(range(len(model.names)))
self.args.save_json |= self.args.val and (self.is_coco or self.is_lvis) and not self.training # run final val
self.names = model.names
self.nc = len(model.names)
self.metrics.names = self.names
self.metrics.plot = self.args.plots
self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf)
self.seen = 0
self.jdict = []
self.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
negative_train_query659_01566
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/get_desc
class DetectionValidator:
def get_desc(self):
"""Return a formatted string summarizing class metrics of YOLO model."""
return ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)")
|
negative_train_query659_01567
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/postprocess
class DetectionValidator:
def postprocess(self, preds):
"""Apply Non-maximum suppression to prediction outputs."""
return ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
labels=self.lb,
multi_label=True,
agnostic=self.args.single_cls or self.args.agnostic_nms,
max_det=self.args.max_det,
)
|
negative_train_query659_01568
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/_prepare_batch
class DetectionValidator:
def _prepare_batch(self, si, batch):
"""Prepares a batch of images and annotations for validation."""
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
ori_shape = batch["ori_shape"][si]
imgsz = batch["img"].shape[2:]
ratio_pad = batch["ratio_pad"][si]
if len(cls):
bbox = ops.xywh2xyxy(bbox) * torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]] # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad) # native-space labels
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
|
negative_train_query659_01569
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/_prepare_pred
class DetectionValidator:
def _prepare_pred(self, pred, pbatch):
"""Prepares a batch of images and annotations for validation."""
predn = pred.clone()
ops.scale_boxes(
pbatch["imgsz"], predn[:, :4], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"]
) # native-space pred
return predn
|
negative_train_query659_01570
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/update_metrics
class DetectionValidator:
def update_metrics(self, preds, batch):
"""Metrics."""
for si, pred in enumerate(preds):
self.seen += 1
npr = len(pred)
stat = dict(
conf=torch.zeros(0, device=self.device),
pred_cls=torch.zeros(0, device=self.device),
tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device),
)
pbatch = self._prepare_batch(si, batch)
cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
nl = len(cls)
stat["target_cls"] = cls
stat["target_img"] = cls.unique()
if npr == 0:
if nl:
for k in self.stats.keys():
self.stats[k].append(stat[k])
if self.args.plots:
self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
continue
# Predictions
if self.args.single_cls:
pred[:, 5] = 0
predn = self._prepare_pred(pred, pbatch)
stat["conf"] = predn[:, 4]
stat["pred_cls"] = predn[:, 5]
# Evaluate
if nl:
stat["tp"] = self._process_batch(predn, bbox, cls)
if self.args.plots:
self.confusion_matrix.process_batch(predn, bbox, cls)
for k in self.stats.keys():
self.stats[k].append(stat[k])
# Save
if self.args.save_json:
self.pred_to_json(predn, batch["im_file"][si])
if self.args.save_txt:
self.save_one_txt(
predn,
self.args.save_conf,
pbatch["ori_shape"],
self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
)
|
negative_train_query659_01571
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/finalize_metrics
class DetectionValidator:
def finalize_metrics(self, *args, **kwargs):
"""Set final values for metrics speed and confusion matrix."""
self.metrics.speed = self.speed
self.metrics.confusion_matrix = self.confusion_matrix
|
negative_train_query659_01572
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/get_stats
class DetectionValidator:
def get_stats(self):
"""Returns metrics statistics and results dictionary."""
stats = {k: torch.cat(v, 0).cpu().numpy() for k, v in self.stats.items()} # to numpy
self.nt_per_class = np.bincount(stats["target_cls"].astype(int), minlength=self.nc)
self.nt_per_image = np.bincount(stats["target_img"].astype(int), minlength=self.nc)
stats.pop("target_img", None)
if len(stats) and stats["tp"].any():
self.metrics.process(**stats)
return self.metrics.results_dict
|
negative_train_query659_01573
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/print_results
class DetectionValidator:
def print_results(self):
"""Prints training/validation set metrics per class."""
pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format
LOGGER.info(pf % ("all", self.seen, self.nt_per_class.sum(), *self.metrics.mean_results()))
if self.nt_per_class.sum() == 0:
LOGGER.warning(f"WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels")
# Print results per class
if self.args.verbose and not self.training and self.nc > 1 and len(self.stats):
for i, c in enumerate(self.metrics.ap_class_index):
LOGGER.info(
pf % (self.names[c], self.nt_per_image[c], self.nt_per_class[c], *self.metrics.class_result(i))
)
if self.args.plots:
for normalize in True, False:
self.confusion_matrix.plot(
save_dir=self.save_dir, names=self.names.values(), normalize=normalize, on_plot=self.on_plot
)
|
negative_train_query659_01574
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/_process_batch
class DetectionValidator:
def _process_batch(self, detections, gt_bboxes, gt_cls):
"""
Return correct prediction matrix.
Args:
detections (torch.Tensor): Tensor of shape (N, 6) representing detections where each detection is
(x1, y1, x2, y2, conf, class).
gt_bboxes (torch.Tensor): Tensor of shape (M, 4) representing ground-truth bounding box coordinates. Each
bounding box is of the format: (x1, y1, x2, y2).
gt_cls (torch.Tensor): Tensor of shape (M,) representing target class indices.
Returns:
(torch.Tensor): Correct prediction matrix of shape (N, 10) for 10 IoU levels.
Note:
The function does not return any value directly usable for metrics calculation. Instead, it provides an
intermediate representation used for evaluating predictions against ground truth.
"""
iou = box_iou(gt_bboxes, detections[:, :4])
return self.match_predictions(detections[:, 5], gt_cls, iou)
|
negative_train_query659_01575
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/build_dataset
class DetectionValidator:
def build_dataset(self, img_path, mode="val", batch=None):
"""
Build YOLO Dataset.
Args:
img_path (str): Path to the folder containing images.
mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
"""
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, stride=self.stride)
|
negative_train_query659_01576
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/get_dataloader
class DetectionValidator:
def get_dataloader(self, dataset_path, batch_size):
"""Construct and return dataloader."""
dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val")
return build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1)
|
negative_train_query659_01577
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/plot_val_samples
class DetectionValidator:
def plot_val_samples(self, batch, ni):
"""Plot validation image samples."""
plot_images(
batch["img"],
batch["batch_idx"],
batch["cls"].squeeze(-1),
batch["bboxes"],
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
names=self.names,
on_plot=self.on_plot,
)
|
negative_train_query659_01578
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/plot_predictions
class DetectionValidator:
def plot_predictions(self, batch, preds, ni):
"""Plots predicted bounding boxes on input images and saves the result."""
plot_images(
batch["img"],
*output_to_target(preds, max_det=self.args.max_det),
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
)
|
negative_train_query659_01579
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/save_one_txt
class DetectionValidator:
def save_one_txt(self, predn, save_conf, shape, file):
"""Save YOLO detections to a txt file in normalized coordinates in a specific format."""
from ultralytics.engine.results import Results
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
boxes=predn[:, :6],
).save_txt(file, save_conf=save_conf)
|
negative_train_query659_01580
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/pred_to_json
class DetectionValidator:
def pred_to_json(self, predn, filename):
"""Serialize YOLO predictions to COCO json format."""
stem = Path(filename).stem
image_id = int(stem) if stem.isnumeric() else stem
box = ops.xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
self.jdict.append(
{
"image_id": image_id,
"category_id": self.class_map[int(p[5])]
+ (1 if self.is_lvis else 0), # index starts from 1 if it's lvis
"bbox": [round(x, 3) for x in b],
"score": round(p[4], 5),
}
)
|
negative_train_query659_01581
|
|
ultralytics/models/yolo/detect/val.py/DetectionValidator/eval_json
class DetectionValidator:
def eval_json(self, stats):
"""Evaluates YOLO output in JSON format and returns performance statistics."""
if self.args.save_json and (self.is_coco or self.is_lvis) and len(self.jdict):
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = (
self.data["path"]
/ "annotations"
/ ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
) # annotations
pkg = "pycocotools" if self.is_coco else "lvis"
LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
for x in pred_json, anno_json:
assert x.is_file(), f"{x} file not found"
check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
if self.is_coco:
from pycocotools.coco import COCO # noqa
from pycocotools.cocoeval import COCOeval # noqa
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
val = COCOeval(anno, pred, "bbox")
else:
from lvis import LVIS, LVISEval
anno = LVIS(str(anno_json)) # init annotations api
pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
val = LVISEval(anno, pred, "bbox")
val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
val.evaluate()
val.accumulate()
val.summarize()
if self.is_lvis:
val.print_results() # explicitly call print_results
# update mAP50-95 and mAP50
stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = (
val.stats[:2] if self.is_coco else [val.results["AP50"], val.results["AP"]]
)
except Exception as e:
LOGGER.warning(f"{pkg} unable to run: {e}")
return stats
|
negative_train_query659_01582
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/build_dataset
class DetectionTrainer:
def build_dataset(self, img_path, mode="train", batch=None):
"""
Build YOLO Dataset.
Args:
img_path (str): Path to the folder containing images.
mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
"""
gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
|
negative_train_query659_01583
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/get_dataloader
class DetectionTrainer:
def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
"""Construct and return dataloader."""
assert mode in {"train", "val"}, f"Mode must be 'train' or 'val', not {mode}."
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = self.build_dataset(dataset_path, mode, batch_size)
shuffle = mode == "train"
if getattr(dataset, "rect", False) and shuffle:
LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
shuffle = False
workers = self.args.workers if mode == "train" else self.args.workers * 2
return build_dataloader(dataset, batch_size, workers, shuffle, rank)
|
negative_train_query659_01584
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/preprocess_batch
class DetectionTrainer:
def preprocess_batch(self, batch):
"""Preprocesses a batch of images by scaling and converting to float."""
batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
if self.args.multi_scale:
imgs = batch["img"]
sz = (
random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1.5 + self.stride))
// self.stride
* self.stride
) # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / self.stride) * self.stride for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
batch["img"] = imgs
return batch
|
negative_train_query659_01585
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/set_model_attributes
class DetectionTrainer:
def set_model_attributes(self):
"""Nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)."""
# self.args.box *= 3 / nl # scale to layers
# self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
# self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
self.model.nc = self.data["nc"] # attach number of classes to model
self.model.names = self.data["names"] # attach class names to model
self.model.args = self.args
|
negative_train_query659_01586
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/get_model
class DetectionTrainer:
def get_model(self, cfg=None, weights=None, verbose=True):
"""Return a YOLO detection model."""
model = DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
|
negative_train_query659_01587
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/get_validator
class DetectionTrainer:
def get_validator(self):
"""Returns a DetectionValidator for YOLO model validation."""
self.loss_names = "box_loss", "cls_loss", "dfl_loss"
return yolo.detect.DetectionValidator(
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
)
|
negative_train_query659_01588
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/label_loss_items
class DetectionTrainer:
def label_loss_items(self, loss_items=None, prefix="train"):
"""
Returns a loss dict with labelled training loss items tensor.
Not needed for classification but necessary for segmentation & detection
"""
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is not None:
loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
return dict(zip(keys, loss_items))
else:
return keys
|
negative_train_query659_01589
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/progress_string
class DetectionTrainer:
def progress_string(self):
"""Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size."""
return ("\n" + "%11s" * (4 + len(self.loss_names))) % (
"Epoch",
"GPU_mem",
*self.loss_names,
"Instances",
"Size",
)
|
negative_train_query659_01590
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/plot_training_samples
class DetectionTrainer:
def plot_training_samples(self, batch, ni):
"""Plots training samples with their annotations."""
plot_images(
images=batch["img"],
batch_idx=batch["batch_idx"],
cls=batch["cls"].squeeze(-1),
bboxes=batch["bboxes"],
paths=batch["im_file"],
fname=self.save_dir / f"train_batch{ni}.jpg",
on_plot=self.on_plot,
)
|
negative_train_query659_01591
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/plot_metrics
class DetectionTrainer:
def plot_metrics(self):
"""Plots metrics from a CSV file."""
plot_results(file=self.csv, on_plot=self.on_plot)
|
negative_train_query659_01592
|
|
ultralytics/models/yolo/detect/train.py/DetectionTrainer/plot_training_labels
class DetectionTrainer:
def plot_training_labels(self):
"""Create a labeled training plot of the YOLO model."""
boxes = np.concatenate([lb["bboxes"] for lb in self.train_loader.dataset.labels], 0)
cls = np.concatenate([lb["cls"] for lb in self.train_loader.dataset.labels], 0)
plot_labels(boxes, cls.squeeze(), names=self.data["names"], save_dir=self.save_dir, on_plot=self.on_plot)
|
negative_train_query659_01593
|
|
ultralytics/models/yolo/detect/predict.py/DetectionPredictor/postprocess
class DetectionPredictor:
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects."""
preds = ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
agnostic=self.args.agnostic_nms,
max_det=self.args.max_det,
classes=self.args.classes,
)
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
results = []
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
return results
|
negative_train_query659_01594
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/__init__
class OBBValidator:
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
"""Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics."""
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
self.args.task = "obb"
self.metrics = OBBMetrics(save_dir=self.save_dir, plot=True, on_plot=self.on_plot)
|
negative_train_query659_01595
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/init_metrics
class OBBValidator:
def init_metrics(self, model):
"""Initialize evaluation metrics for YOLO."""
super().init_metrics(model)
val = self.data.get(self.args.split, "") # validation path
self.is_dota = isinstance(val, str) and "DOTA" in val
|
negative_train_query659_01596
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/postprocess
class OBBValidator:
def postprocess(self, preds):
"""Apply Non-maximum suppression to prediction outputs."""
return ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
labels=self.lb,
nc=self.nc,
multi_label=True,
agnostic=self.args.single_cls or self.args.agnostic_nms,
max_det=self.args.max_det,
rotated=True,
)
|
negative_train_query659_01597
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/_process_batch
class OBBValidator:
def _process_batch(self, detections, gt_bboxes, gt_cls):
"""
Perform computation of the correct prediction matrix for a batch of detections and ground truth bounding boxes.
Args:
detections (torch.Tensor): A tensor of shape (N, 7) representing the detected bounding boxes and associated
data. Each detection is represented as (x1, y1, x2, y2, conf, class, angle).
gt_bboxes (torch.Tensor): A tensor of shape (M, 5) representing the ground truth bounding boxes. Each box is
represented as (x1, y1, x2, y2, angle).
gt_cls (torch.Tensor): A tensor of shape (M,) representing class labels for the ground truth bounding boxes.
Returns:
(torch.Tensor): The correct prediction matrix with shape (N, 10), which includes 10 IoU (Intersection over
Union) levels for each detection, indicating the accuracy of predictions compared to the ground truth.
Example:
```python
detections = torch.rand(100, 7) # 100 sample detections
gt_bboxes = torch.rand(50, 5) # 50 sample ground truth boxes
gt_cls = torch.randint(0, 5, (50,)) # 50 ground truth class labels
correct_matrix = OBBValidator._process_batch(detections, gt_bboxes, gt_cls)
```
Note:
This method relies on `batch_probiou` to calculate IoU between detections and ground truth bounding boxes.
"""
iou = batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
return self.match_predictions(detections[:, 5], gt_cls, iou)
|
negative_train_query659_01598
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/_prepare_batch
class OBBValidator:
def _prepare_batch(self, si, batch):
"""Prepares and returns a batch for OBB validation."""
idx = batch["batch_idx"] == si
cls = batch["cls"][idx].squeeze(-1)
bbox = batch["bboxes"][idx]
ori_shape = batch["ori_shape"][si]
imgsz = batch["img"].shape[2:]
ratio_pad = batch["ratio_pad"][si]
if len(cls):
bbox[..., :4].mul_(torch.tensor(imgsz, device=self.device)[[1, 0, 1, 0]]) # target boxes
ops.scale_boxes(imgsz, bbox, ori_shape, ratio_pad=ratio_pad, xywh=True) # native-space labels
return {"cls": cls, "bbox": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
|
negative_train_query659_01599
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/_prepare_pred
class OBBValidator:
def _prepare_pred(self, pred, pbatch):
"""Prepares and returns a batch for OBB validation with scaled and padded bounding boxes."""
predn = pred.clone()
ops.scale_boxes(
pbatch["imgsz"], predn[:, :4], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"], xywh=True
) # native-space pred
return predn
|
negative_train_query659_01600
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/plot_predictions
class OBBValidator:
def plot_predictions(self, batch, preds, ni):
"""Plots predicted bounding boxes on input images and saves the result."""
plot_images(
batch["img"],
*output_to_rotated_target(preds, max_det=self.args.max_det),
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
names=self.names,
on_plot=self.on_plot,
)
|
negative_train_query659_01601
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/pred_to_json
class OBBValidator:
def pred_to_json(self, predn, filename):
"""Serialize YOLO predictions to COCO json format."""
stem = Path(filename).stem
image_id = int(stem) if stem.isnumeric() else stem
rbox = torch.cat([predn[:, :4], predn[:, -1:]], dim=-1)
poly = ops.xywhr2xyxyxyxy(rbox).view(-1, 8)
for i, (r, b) in enumerate(zip(rbox.tolist(), poly.tolist())):
self.jdict.append(
{
"image_id": image_id,
"category_id": self.class_map[int(predn[i, 5].item())],
"score": round(predn[i, 4].item(), 5),
"rbox": [round(x, 3) for x in r],
"poly": [round(x, 3) for x in b],
}
)
|
negative_train_query659_01602
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/save_one_txt
class OBBValidator:
def save_one_txt(self, predn, save_conf, shape, file):
"""Save YOLO detections to a txt file in normalized coordinates in a specific format."""
import numpy as np
from ultralytics.engine.results import Results
rboxes = torch.cat([predn[:, :4], predn[:, -1:]], dim=-1)
# xywh, r, conf, cls
obb = torch.cat([rboxes, predn[:, 4:6]], dim=-1)
Results(
np.zeros((shape[0], shape[1]), dtype=np.uint8),
path=None,
names=self.names,
obb=obb,
).save_txt(file, save_conf=save_conf)
|
negative_train_query659_01603
|
|
ultralytics/models/yolo/obb/val.py/OBBValidator/eval_json
class OBBValidator:
def eval_json(self, stats):
"""Evaluates YOLO output in JSON format and returns performance statistics."""
if self.args.save_json and self.is_dota and len(self.jdict):
import json
import re
from collections import defaultdict
pred_json = self.save_dir / "predictions.json" # predictions
pred_txt = self.save_dir / "predictions_txt" # predictions
pred_txt.mkdir(parents=True, exist_ok=True)
data = json.load(open(pred_json))
# Save split results
LOGGER.info(f"Saving predictions with DOTA format to {pred_txt}...")
for d in data:
image_id = d["image_id"]
score = d["score"]
classname = self.names[d["category_id"]].replace(" ", "-")
p = d["poly"]
with open(f'{pred_txt / f"Task1_{classname}"}.txt', "a") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
# Save merged results, this could result slightly lower map than using official merging script,
# because of the probiou calculation.
pred_merged_txt = self.save_dir / "predictions_merged_txt" # predictions
pred_merged_txt.mkdir(parents=True, exist_ok=True)
merged_results = defaultdict(list)
LOGGER.info(f"Saving merged predictions with DOTA format to {pred_merged_txt}...")
for d in data:
image_id = d["image_id"].split("__")[0]
pattern = re.compile(r"\d+___\d+")
x, y = (int(c) for c in re.findall(pattern, d["image_id"])[0].split("___"))
bbox, score, cls = d["rbox"], d["score"], d["category_id"]
bbox[0] += x
bbox[1] += y
bbox.extend([score, cls])
merged_results[image_id].append(bbox)
for image_id, bbox in merged_results.items():
bbox = torch.tensor(bbox)
max_wh = torch.max(bbox[:, :2]).item() * 2
c = bbox[:, 6:7] * max_wh # classes
scores = bbox[:, 5] # scores
b = bbox[:, :5].clone()
b[:, :2] += c
# 0.3 could get results close to the ones from official merging script, even slightly better.
i = ops.nms_rotated(b, scores, 0.3)
bbox = bbox[i]
b = ops.xywhr2xyxyxyxy(bbox[:, :5]).view(-1, 8)
for x in torch.cat([b, bbox[:, 5:7]], dim=-1).tolist():
classname = self.names[int(x[-1])].replace(" ", "-")
p = [round(i, 3) for i in x[:-2]] # poly
score = round(x[-2], 3)
with open(f'{pred_merged_txt / f"Task1_{classname}"}.txt', "a") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
return stats
|
negative_train_query659_01604
|
|
ultralytics/models/yolo/obb/train.py/OBBTrainer/__init__
class OBBTrainer:
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
"""Initialize a OBBTrainer object with given arguments."""
if overrides is None:
overrides = {}
overrides["task"] = "obb"
super().__init__(cfg, overrides, _callbacks)
|
negative_train_query659_01605
|
|
ultralytics/models/yolo/obb/train.py/OBBTrainer/get_model
class OBBTrainer:
def get_model(self, cfg=None, weights=None, verbose=True):
"""Return OBBModel initialized with specified config and weights."""
model = OBBModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
|
negative_train_query659_01606
|
|
ultralytics/models/yolo/obb/train.py/OBBTrainer/get_validator
class OBBTrainer:
def get_validator(self):
"""Return an instance of OBBValidator for validation of YOLO model."""
self.loss_names = "box_loss", "cls_loss", "dfl_loss"
return yolo.obb.OBBValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
|
negative_train_query659_01607
|
|
ultralytics/models/yolo/obb/predict.py/OBBPredictor/__init__
class OBBPredictor:
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
"""Initializes OBBPredictor with optional model and data configuration overrides."""
super().__init__(cfg, overrides, _callbacks)
self.args.task = "obb"
|
negative_train_query659_01608
|
|
ultralytics/models/yolo/obb/predict.py/OBBPredictor/postprocess
class OBBPredictor:
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects."""
preds = ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
agnostic=self.args.agnostic_nms,
max_det=self.args.max_det,
nc=len(self.model.names),
classes=self.args.classes,
rotated=True,
)
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
results = []
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
rboxes = ops.regularize_rboxes(torch.cat([pred[:, :4], pred[:, -1:]], dim=-1))
rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
# xywh, r, conf, cls
obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
results.append(Results(orig_img, path=img_path, names=self.model.names, obb=obb))
return results
|
negative_train_query659_01609
|
|
ultralytics/models/nas/val.py/NASValidator/postprocess
class NASValidator:
def postprocess(self, preds_in):
"""Apply Non-maximum suppression to prediction outputs."""
boxes = ops.xyxy2xywh(preds_in[0][0])
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
return ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
labels=self.lb,
multi_label=False,
agnostic=self.args.single_cls or self.args.agnostic_nms,
max_det=self.args.max_det,
max_time_img=0.5,
)
|
negative_train_query659_01610
|
|
ultralytics/models/nas/model.py/NAS/__init__
class NAS:
def __init__(self, model="yolo_nas_s.pt") -> None:
"""Initializes the NAS model with the provided or default 'yolo_nas_s.pt' model."""
assert Path(model).suffix not in {".yaml", ".yml"}, "YOLO-NAS models only support pre-trained models."
super().__init__(model, task="detect")
|
negative_train_query659_01611
|
|
ultralytics/models/nas/model.py/NAS/_load
class NAS:
def _load(self, weights: str, task=None) -> None:
"""Loads an existing NAS model weights or creates a new NAS model with pretrained weights if not provided."""
import super_gradients
suffix = Path(weights).suffix
if suffix == ".pt":
self.model = torch.load(attempt_download_asset(weights))
elif suffix == "":
self.model = super_gradients.training.models.get(weights, pretrained_weights="coco")
# Override the forward method to ignore additional arguments
def new_forward(x, *args, **kwargs):
"""Ignore additional __call__ arguments."""
return self.model._original_forward(x)
self.model._original_forward = self.model.forward
self.model.forward = new_forward
# Standardize model
self.model.fuse = lambda verbose=True: self.model
self.model.stride = torch.tensor([32])
self.model.names = dict(enumerate(self.model._class_names))
self.model.is_fused = lambda: False # for info()
self.model.yaml = {} # for info()
self.model.pt_path = weights # for export()
self.model.task = "detect"
|
negative_train_query659_01612
|
|
ultralytics/models/nas/model.py/NAS/_load/new_forward
class NAS:
def new_forward(x, *args, **kwargs):
"""Ignore additional __call__ arguments."""
return self.model._original_forward(x)
|
negative_train_query659_01613
|
|
ultralytics/models/nas/model.py/NAS/info
class NAS:
def info(self, detailed=False, verbose=True):
"""
Logs model info.
Args:
detailed (bool): Show detailed information about model.
verbose (bool): Controls verbosity.
"""
return model_info(self.model, detailed=detailed, verbose=verbose, imgsz=640)
|
negative_train_query659_01614
|
|
ultralytics/models/nas/model.py/NAS/task_map
class NAS:
def task_map(self):
"""Returns a dictionary mapping tasks to respective predictor and validator classes."""
return {"detect": {"predictor": NASPredictor, "validator": NASValidator}}
|
negative_train_query659_01615
|
|
ultralytics/models/nas/predict.py/NASPredictor/postprocess
class NASPredictor:
def postprocess(self, preds_in, img, orig_imgs):
"""Postprocess predictions and returns a list of Results objects."""
# Cat boxes and class scores
boxes = ops.xyxy2xywh(preds_in[0][0])
preds = torch.cat((boxes, preds_in[0][1]), -1).permute(0, 2, 1)
preds = ops.non_max_suppression(
preds,
self.args.conf,
self.args.iou,
agnostic=self.args.agnostic_nms,
max_det=self.args.max_det,
classes=self.args.classes,
)
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
results = []
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
return results
|
negative_train_query659_01616
|
|
ultralytics/models/fastsam/val.py/FastSAMValidator/__init__
class FastSAMValidator:
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
"""
Initialize the FastSAMValidator class, setting the task to 'segment' and metrics to SegmentMetrics.
Args:
dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation.
save_dir (Path, optional): Directory to save results.
pbar (tqdm.tqdm): Progress bar for displaying progress.
args (SimpleNamespace): Configuration for the validator.
_callbacks (dict): Dictionary to store various callback functions.
Notes:
Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors.
"""
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
self.args.task = "segment"
self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot)
|
negative_train_query659_01617
|
|
ultralytics/models/fastsam/model.py/FastSAM/__init__
class FastSAM:
def __init__(self, model="FastSAM-x.pt"):
"""Call the __init__ method of the parent class (YOLO) with the updated default model."""
if str(model) == "FastSAM.pt":
model = "FastSAM-x.pt"
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM models only support pre-trained models."
super().__init__(model=model, task="segment")
|
negative_train_query659_01618
|
|
ultralytics/models/fastsam/model.py/FastSAM/predict
class FastSAM:
def predict(self, source, stream=False, bboxes=None, points=None, labels=None, texts=None, **kwargs):
"""
Perform segmentation prediction on image or video source.
Supports prompted segmentation with bounding boxes, points, labels, and texts.
Args:
source (str | PIL.Image | numpy.ndarray): Input source.
stream (bool): Enable real-time streaming.
bboxes (list): Bounding box coordinates for prompted segmentation.
points (list): Points for prompted segmentation.
labels (list): Labels for prompted segmentation.
texts (list): Texts for prompted segmentation.
**kwargs (Any): Additional keyword arguments.
Returns:
(list): Model predictions.
"""
prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
return super().predict(source, stream, prompts=prompts, **kwargs)
|
negative_train_query659_01619
|
|
ultralytics/models/fastsam/model.py/FastSAM/task_map
class FastSAM:
def task_map(self):
"""Returns a dictionary mapping segment task to corresponding predictor and validator classes."""
return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}}
|
negative_train_query659_01620
|
|
ultralytics/models/fastsam/predict.py/FastSAMPredictor/__init__
class FastSAMPredictor:
def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
"""Initializes a FastSAMPredictor for fast SAM segmentation tasks in Ultralytics YOLO framework."""
super().__init__(cfg, overrides, _callbacks)
self.prompts = {}
|
negative_train_query659_01621
|
|
ultralytics/models/fastsam/predict.py/FastSAMPredictor/postprocess
class FastSAMPredictor:
def postprocess(self, preds, img, orig_imgs):
"""Applies box postprocess for FastSAM predictions."""
bboxes = self.prompts.pop("bboxes", None)
points = self.prompts.pop("points", None)
labels = self.prompts.pop("labels", None)
texts = self.prompts.pop("texts", None)
results = super().postprocess(preds, img, orig_imgs)
for result in results:
full_box = torch.tensor(
[0, 0, result.orig_shape[1], result.orig_shape[0]], device=preds[0].device, dtype=torch.float32
)
boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
if idx.numel() != 0:
result.boxes.xyxy[idx] = full_box
return self.prompt(results, bboxes=bboxes, points=points, labels=labels, texts=texts)
|
negative_train_query659_01622
|
|
ultralytics/models/fastsam/predict.py/FastSAMPredictor/prompt
class FastSAMPredictor:
def prompt(self, results, bboxes=None, points=None, labels=None, texts=None):
"""
Internal function for image segmentation inference based on cues like bounding boxes, points, and masks.
Leverages SAM's specialized architecture for prompt-based, real-time segmentation.
Args:
results (Results | List[Results]): The original inference results from FastSAM models without any prompts.
bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.
points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixels.
labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 = foreground, 0 = background.
texts (str | List[str], optional): Textual prompts, a list contains string objects.
Returns:
(List[Results]): The output results determined by prompts.
"""
if bboxes is None and points is None and texts is None:
return results
prompt_results = []
if not isinstance(results, list):
results = [results]
for result in results:
masks = result.masks.data
if masks.shape[1:] != result.orig_shape:
masks = scale_masks(masks[None], result.orig_shape)[0]
# bboxes prompt
idx = torch.zeros(len(result), dtype=torch.bool, device=self.device)
if bboxes is not None:
bboxes = torch.as_tensor(bboxes, dtype=torch.int32, device=self.device)
bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
bbox_areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
mask_areas = torch.stack([masks[:, b[1] : b[3], b[0] : b[2]].sum(dim=(1, 2)) for b in bboxes])
full_mask_areas = torch.sum(masks, dim=(1, 2))
union = bbox_areas[:, None] + full_mask_areas - mask_areas
idx[torch.argmax(mask_areas / union, dim=1)] = True
if points is not None:
points = torch.as_tensor(points, dtype=torch.int32, device=self.device)
points = points[None] if points.ndim == 1 else points
if labels is None:
labels = torch.ones(points.shape[0])
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
assert len(labels) == len(
points
), f"Excepted `labels` got same size as `point`, but got {len(labels)} and {len(points)}"
point_idx = (
torch.ones(len(result), dtype=torch.bool, device=self.device)
if labels.sum() == 0 # all negative points
else torch.zeros(len(result), dtype=torch.bool, device=self.device)
)
for point, label in zip(points, labels):
point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = bool(label)
idx |= point_idx
if texts is not None:
if isinstance(texts, str):
texts = [texts]
crop_ims, filter_idx = [], []
for i, b in enumerate(result.boxes.xyxy.tolist()):
x1, y1, x2, y2 = (int(x) for x in b)
if masks[i].sum() <= 100:
filter_idx.append(i)
continue
crop_ims.append(Image.fromarray(result.orig_img[y1:y2, x1:x2, ::-1]))
similarity = self._clip_inference(crop_ims, texts)
text_idx = torch.argmax(similarity, dim=-1) # (M, )
if len(filter_idx):
text_idx += (torch.tensor(filter_idx, device=self.device)[None] <= int(text_idx)).sum(0)
idx[text_idx] = True
prompt_results.append(result[idx])
return prompt_results
|
negative_train_query659_01623
|
|
ultralytics/models/fastsam/predict.py/FastSAMPredictor/_clip_inference
class FastSAMPredictor:
def _clip_inference(self, images, texts):
"""
CLIP Inference process.
Args:
images (List[PIL.Image]): A list of source images and each of them should be PIL.Image type with RGB channel order.
texts (List[str]): A list of prompt texts and each of them should be string object.
Returns:
(torch.Tensor): The similarity between given images and texts.
"""
try:
import clip
except ImportError:
checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
import clip
if (not hasattr(self, "clip_model")) or (not hasattr(self, "clip_preprocess")):
self.clip_model, self.clip_preprocess = clip.load("ViT-B/32", device=self.device)
images = torch.stack([self.clip_preprocess(image).to(self.device) for image in images])
tokenized_text = clip.tokenize(texts).to(self.device)
image_features = self.clip_model.encode_image(images)
text_features = self.clip_model.encode_text(tokenized_text)
image_features /= image_features.norm(dim=-1, keepdim=True) # (N, 512)
text_features /= text_features.norm(dim=-1, keepdim=True) # (M, 512)
return (image_features * text_features[:, None]).sum(-1)
|
negative_train_query659_01624
|
|
ultralytics/models/fastsam/predict.py/FastSAMPredictor/set_prompts
class FastSAMPredictor:
def set_prompts(self, prompts):
"""Set prompts in advance."""
self.prompts = prompts
|
negative_train_query659_01625
|
|
ultralytics/models/fastsam/utils.py/adjust_bboxes_to_image_border
def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20):
"""
Adjust bounding boxes to stick to image border if they are within a certain threshold.
Args:
boxes (torch.Tensor): (n, 4)
image_shape (tuple): (height, width)
threshold (int): pixel threshold
Returns:
adjusted_boxes (torch.Tensor): adjusted bounding boxes
"""
# Image dimensions
h, w = image_shape
# Adjust boxes
boxes[boxes[:, 0] < threshold, 0] = 0 # x1
boxes[boxes[:, 1] < threshold, 1] = 0 # y1
boxes[boxes[:, 2] > w - threshold, 2] = w # x2
boxes[boxes[:, 3] > h - threshold, 3] = h # y2
return boxes
|
negative_train_query659_01626
|
|
ultralytics/trackers/track.py/on_predict_start
def on_predict_start(predictor: object, persist: bool = False) -> None:
"""
Initialize trackers for object tracking during prediction.
Args:
predictor (object): The predictor object to initialize trackers for.
persist (bool): Whether to persist the trackers if they already exist.
Raises:
AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
Examples:
Initialize trackers for a predictor object:
>>> predictor = SomePredictorClass()
>>> on_predict_start(predictor, persist=True)
"""
if hasattr(predictor, "trackers") and persist:
return
tracker = check_yaml(predictor.args.tracker)
cfg = IterableSimpleNamespace(**yaml_load(tracker))
if cfg.tracker_type not in {"bytetrack", "botsort"}:
raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'")
trackers = []
for _ in range(predictor.dataset.bs):
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
trackers.append(tracker)
if predictor.dataset.mode != "stream": # only need one tracker for other modes.
break
predictor.trackers = trackers
predictor.vid_path = [None] * predictor.dataset.bs
|
negative_train_query659_01627
|
|
ultralytics/trackers/track.py/on_predict_postprocess_end
def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None:
"""
Postprocess detected boxes and update with object tracking.
Args:
predictor (object): The predictor object containing the predictions.
persist (bool): Whether to persist the trackers if they already exist.
Examples:
Postprocess predictions and update with tracking
>>> predictor = YourPredictorClass()
>>> on_predict_postprocess_end(predictor, persist=True)
"""
path, im0s = predictor.batch[:2]
is_obb = predictor.args.task == "obb"
is_stream = predictor.dataset.mode == "stream"
for i in range(len(im0s)):
tracker = predictor.trackers[i if is_stream else 0]
vid_path = predictor.save_dir / Path(path[i]).name
if not persist and predictor.vid_path[i if is_stream else 0] != vid_path:
tracker.reset()
predictor.vid_path[i if is_stream else 0] = vid_path
det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
if len(det) == 0:
continue
tracks = tracker.update(det, im0s[i])
if len(tracks) == 0:
continue
idx = tracks[:, -1].astype(int)
predictor.results[i] = predictor.results[i][idx]
update_args = {"obb" if is_obb else "boxes": torch.as_tensor(tracks[:, :-1])}
predictor.results[i].update(**update_args)
|
negative_train_query659_01628
|
|
ultralytics/trackers/track.py/register_tracker
def register_tracker(model: object, persist: bool) -> None:
"""
Register tracking callbacks to the model for object tracking during prediction.
Args:
model (object): The model object to register tracking callbacks for.
persist (bool): Whether to persist the trackers if they already exist.
Examples:
Register tracking callbacks to a YOLO model
>>> model = YOLOModel()
>>> register_tracker(model, persist=True)
"""
model.add_callback("on_predict_start", partial(on_predict_start, persist=persist))
model.add_callback("on_predict_postprocess_end", partial(on_predict_postprocess_end, persist=persist))
|
negative_train_query659_01629
|
|
ultralytics/trackers/byte_tracker.py/STrack/__init__
class STrack:
def __init__(self, xywh, score, cls):
"""
Initialize a new STrack instance.
Args:
xywh (List[float]): Bounding box coordinates and dimensions in the format (x, y, w, h, [a], idx), where
(x, y) is the center, (w, h) are width and height, [a] is optional aspect ratio, and idx is the id.
score (float): Confidence score of the detection.
cls (Any): Class label for the detected object.
Examples:
>>> xywh = [100.0, 150.0, 50.0, 75.0, 1]
>>> score = 0.9
>>> cls = "person"
>>> track = STrack(xywh, score, cls)
"""
super().__init__()
# xywh+idx or xywha+idx
assert len(xywh) in {5, 6}, f"expected 5 or 6 values but got {len(xywh)}"
self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.score = score
self.tracklet_len = 0
self.cls = cls
self.idx = xywh[-1]
self.angle = xywh[4] if len(xywh) == 6 else None
|
negative_train_query659_01630
|
|
ultralytics/trackers/byte_tracker.py/STrack/predict
class STrack:
def predict(self):
"""Predicts the next state (mean and covariance) of the object using the Kalman filter."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
|
negative_train_query659_01631
|
|
ultralytics/trackers/byte_tracker.py/STrack/multi_predict
class STrack:
def multi_predict(stracks):
"""Perform multi-object predictive tracking using Kalman filter for the provided list of STrack instances."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
|
negative_train_query659_01632
|
|
ultralytics/trackers/byte_tracker.py/STrack/multi_gmc
class STrack:
def multi_gmc(stracks, H=np.eye(2, 3)):
"""Update state tracks positions and covariances using a homography matrix for multiple tracks."""
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
R = H[:2, :2]
R8x8 = np.kron(np.eye(4, dtype=float), R)
t = H[:2, 2]
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
mean = R8x8.dot(mean)
mean[:2] += t
cov = R8x8.dot(cov).dot(R8x8.transpose())
stracks[i].mean = mean
stracks[i].covariance = cov
|
negative_train_query659_01633
|
|
ultralytics/trackers/byte_tracker.py/STrack/activate
class STrack:
def activate(self, kalman_filter, frame_id):
"""Activate a new tracklet using the provided Kalman filter and initialize its state and covariance."""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
self.frame_id = frame_id
self.start_frame = frame_id
|
negative_train_query659_01634
|
|
ultralytics/trackers/byte_tracker.py/STrack/re_activate
class STrack:
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a previously lost track using new detection data and updates its state and attributes."""
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.convert_coords(new_track.tlwh)
)
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
self.cls = new_track.cls
self.angle = new_track.angle
self.idx = new_track.idx
|
negative_train_query659_01635
|
|
ultralytics/trackers/byte_tracker.py/STrack/update
class STrack:
def update(self, new_track, frame_id):
"""
Update the state of a matched track.
Args:
new_track (STrack): The new track containing updated information.
frame_id (int): The ID of the current frame.
Examples:
Update the state of a track with new detection information
>>> track = STrack([100, 200, 50, 80, 0.9, 1])
>>> new_track = STrack([105, 205, 55, 85, 0.95, 1])
>>> track.update(new_track, 2)
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(
self.mean, self.covariance, self.convert_coords(new_tlwh)
)
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
self.cls = new_track.cls
self.angle = new_track.angle
self.idx = new_track.idx
|
negative_train_query659_01636
|
|
ultralytics/trackers/byte_tracker.py/STrack/convert_coords
class STrack:
def convert_coords(self, tlwh):
"""Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent."""
return self.tlwh_to_xyah(tlwh)
|
negative_train_query659_01637
|
|
ultralytics/trackers/byte_tracker.py/STrack/tlwh
class STrack:
def tlwh(self):
"""Returns the bounding box in top-left-width-height format from the current state estimate."""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
|
negative_train_query659_01638
|
|
ultralytics/trackers/byte_tracker.py/STrack/xyxy
class STrack:
def xyxy(self):
"""Converts bounding box from (top left x, top left y, width, height) to (min x, min y, max x, max y) format."""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
|
negative_train_query659_01639
|
|
ultralytics/trackers/byte_tracker.py/STrack/tlwh_to_xyah
class STrack:
def tlwh_to_xyah(tlwh):
"""Convert bounding box from tlwh format to center-x-center-y-aspect-height (xyah) format."""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
|
negative_train_query659_01640
|
|
ultralytics/trackers/byte_tracker.py/STrack/xywh
class STrack:
def xywh(self):
"""Returns the current position of the bounding box in (center x, center y, width, height) format."""
ret = np.asarray(self.tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
|
negative_train_query659_01641
|
|
ultralytics/trackers/byte_tracker.py/STrack/xywha
class STrack:
def xywha(self):
"""Returns position in (center x, center y, width, height, angle) format, warning if angle is missing."""
if self.angle is None:
LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.")
return self.xywh
return np.concatenate([self.xywh, self.angle[None]])
|
negative_train_query659_01642
|
|
ultralytics/trackers/byte_tracker.py/STrack/result
class STrack:
def result(self):
"""Returns the current tracking results in the appropriate bounding box format."""
coords = self.xyxy if self.angle is None else self.xywha
return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
|
negative_train_query659_01643
|
|
ultralytics/trackers/byte_tracker.py/STrack/__repr__
class STrack:
def __repr__(self):
"""Returns a string representation of the STrack object including start frame, end frame, and track ID."""
return f"OT_{self.track_id}_({self.start_frame}-{self.end_frame})"
|
negative_train_query659_01644
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/__init__
class BYTETracker:
def __init__(self, args, frame_rate=30):
"""
Initialize a BYTETracker instance for object tracking.
Args:
args (Namespace): Command-line arguments containing tracking parameters.
frame_rate (int): Frame rate of the video sequence.
Examples:
Initialize BYTETracker with command-line arguments and a frame rate of 30
>>> args = Namespace(track_buffer=30)
>>> tracker = BYTETracker(args, frame_rate=30)
"""
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.args = args
self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer)
self.kalman_filter = self.get_kalmanfilter()
self.reset_id()
|
negative_train_query659_01645
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/update
class BYTETracker:
def update(self, results, img=None):
"""Updates the tracker with new detections and returns the current list of tracked objects."""
self.frame_id += 1
activated_stracks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
scores = results.conf
bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh
# Add index
bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
cls = results.cls
remain_inds = scores >= self.args.track_high_thresh
inds_low = scores > self.args.track_low_thresh
inds_high = scores < self.args.track_high_thresh
inds_second = inds_low & inds_high
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
cls_keep = cls[remain_inds]
cls_second = cls[inds_second]
detections = self.init_track(dets, scores_keep, cls_keep, img)
# Add newly detected tracklets to tracked_stracks
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
# Step 2: First association, with high score detection boxes
strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
self.multi_predict(strack_pool)
if hasattr(self, "gmc") and img is not None:
warp = self.gmc.apply(img, dets)
STrack.multi_gmc(strack_pool, warp)
STrack.multi_gmc(unconfirmed, warp)
dists = self.get_dists(strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# Step 3: Second association, with low score detection boxes association the untrack to the low score detections
detections_second = self.init_track(dets_second, scores_second, cls_second, img)
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
# TODO
dists = matching.iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if track.state != TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
# Deal with unconfirmed tracks, usually tracks with only one beginning frame
detections = [detections[i] for i in u_detection]
dists = self.get_dists(unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_stracks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
# Step 4: Init new stracks
for inew in u_detection:
track = detections[inew]
if track.score < self.args.new_track_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_stracks.append(track)
# Step 5: Update state
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks)
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks)
self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
self.removed_stracks.extend(removed_stracks)
if len(self.removed_stracks) > 1000:
self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
|
negative_train_query659_01646
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/get_kalmanfilter
class BYTETracker:
def get_kalmanfilter(self):
"""Returns a Kalman filter object for tracking bounding boxes using KalmanFilterXYAH."""
return KalmanFilterXYAH()
|
negative_train_query659_01647
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/init_track
class BYTETracker:
def init_track(self, dets, scores, cls, img=None):
"""Initializes object tracking with given detections, scores, and class labels using the STrack algorithm."""
return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else []
|
negative_train_query659_01648
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/get_dists
class BYTETracker:
def get_dists(self, tracks, detections):
"""Calculates the distance between tracks and detections using IoU and optionally fuses scores."""
dists = matching.iou_distance(tracks, detections)
if self.args.fuse_score:
dists = matching.fuse_score(dists, detections)
return dists
|
negative_train_query659_01649
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/multi_predict
class BYTETracker:
def multi_predict(self, tracks):
"""Predict the next states for multiple tracks using Kalman filter."""
STrack.multi_predict(tracks)
|
negative_train_query659_01650
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/reset_id
class BYTETracker:
def reset_id():
"""Resets the ID counter for STrack instances to ensure unique track IDs across tracking sessions."""
STrack.reset_id()
|
negative_train_query659_01651
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/reset
class BYTETracker:
def reset(self):
"""Resets the tracker by clearing all tracked, lost, and removed tracks and reinitializing the Kalman filter."""
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.kalman_filter = self.get_kalmanfilter()
self.reset_id()
|
negative_train_query659_01652
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/joint_stracks
class BYTETracker:
def joint_stracks(tlista, tlistb):
"""Combines two lists of STrack objects into a single list, ensuring no duplicates based on track IDs."""
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
|
negative_train_query659_01653
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/sub_stracks
class BYTETracker:
def sub_stracks(tlista, tlistb):
"""Filters out the stracks present in the second list from the first list."""
track_ids_b = {t.track_id for t in tlistb}
return [t for t in tlista if t.track_id not in track_ids_b]
|
negative_train_query659_01654
|
|
ultralytics/trackers/byte_tracker.py/BYTETracker/remove_duplicate_stracks
class BYTETracker:
def remove_duplicate_stracks(stracksa, stracksb):
"""Removes duplicate stracks from two lists based on Intersection over Union (IoU) distance."""
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = [], []
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if i not in dupa]
resb = [t for i, t in enumerate(stracksb) if i not in dupb]
return resa, resb
|
negative_train_query659_01655
|
|
ultralytics/trackers/bot_sort.py/BOTrack/__init__
class BOTrack:
def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
"""
Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
Args:
tlwh (np.ndarray): Bounding box coordinates in tlwh format (top left x, top left y, width, height).
score (float): Confidence score of the detection.
cls (int): Class ID of the detected object.
feat (np.ndarray | None): Feature vector associated with the detection.
feat_history (int): Maximum length of the feature history deque.
Examples:
Initialize a BOTrack object with bounding box, score, class ID, and feature vector
>>> tlwh = np.array([100, 50, 80, 120])
>>> score = 0.9
>>> cls = 1
>>> feat = np.random.rand(128)
>>> bo_track = BOTrack(tlwh, score, cls, feat)
"""
super().__init__(tlwh, score, cls)
self.smooth_feat = None
self.curr_feat = None
if feat is not None:
self.update_features(feat)
self.features = deque([], maxlen=feat_history)
self.alpha = 0.9
|
negative_train_query659_01656
|
|
ultralytics/trackers/bot_sort.py/BOTrack/update_features
class BOTrack:
def update_features(self, feat):
"""Update the feature vector and apply exponential moving average smoothing."""
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
|
negative_train_query659_01657
|
|
ultralytics/trackers/bot_sort.py/BOTrack/predict
class BOTrack:
def predict(self):
"""Predicts the object's future state using the Kalman filter to update its mean and covariance."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[6] = 0
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
|
negative_train_query659_01658
|
|
ultralytics/trackers/bot_sort.py/BOTrack/re_activate
class BOTrack:
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a track with updated features and optionally assigns a new ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().re_activate(new_track, frame_id, new_id)
|
negative_train_query659_01659
|
|
ultralytics/trackers/bot_sort.py/BOTrack/update
class BOTrack:
def update(self, new_track, frame_id):
"""Updates the YOLOv8 instance with new track information and the current frame ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().update(new_track, frame_id)
|
negative_train_query659_01660
|
|
ultralytics/trackers/bot_sort.py/BOTrack/tlwh
class BOTrack:
def tlwh(self):
"""Returns the current bounding box position in `(top left x, top left y, width, height)` format."""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[:2] -= ret[2:] / 2
return ret
|
negative_train_query659_01661
|
|
ultralytics/trackers/bot_sort.py/BOTrack/multi_predict
class BOTrack:
def multi_predict(stracks):
"""Predicts the mean and covariance for multiple object tracks using a shared Kalman filter."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][6] = 0
multi_mean[i][7] = 0
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
|
negative_train_query659_01662
|
|
ultralytics/trackers/bot_sort.py/BOTrack/convert_coords
class BOTrack:
def convert_coords(self, tlwh):
"""Converts tlwh bounding box coordinates to xywh format."""
return self.tlwh_to_xywh(tlwh)
|
negative_train_query659_01663
|
|
ultralytics/trackers/bot_sort.py/BOTrack/tlwh_to_xywh
class BOTrack:
def tlwh_to_xywh(tlwh):
"""Convert bounding box from tlwh (top-left-width-height) to xywh (center-x-center-y-width-height) format."""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
|
negative_train_query659_01664
|
|
ultralytics/trackers/bot_sort.py/BOTSORT/__init__
class BOTSORT:
def __init__(self, args, frame_rate=30):
"""
Initialize YOLOv8 object with ReID module and GMC algorithm.
Args:
args (object): Parsed command-line arguments containing tracking parameters.
frame_rate (int): Frame rate of the video being processed.
Examples:
Initialize BOTSORT with command-line arguments and a specified frame rate:
>>> args = parse_args()
>>> bot_sort = BOTSORT(args, frame_rate=30)
"""
super().__init__(args, frame_rate)
# ReID module
self.proximity_thresh = args.proximity_thresh
self.appearance_thresh = args.appearance_thresh
if args.with_reid:
# Haven't supported BoT-SORT(reid) yet
self.encoder = None
self.gmc = GMC(method=args.gmc_method)
|
negative_train_query659_01665
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.